diff -Nru protobuf-2.6.1/debian/changelog protobuf-2.6.1/debian/changelog --- protobuf-2.6.1/debian/changelog 2015-08-26 20:38:00.000000000 +0000 +++ protobuf-2.6.1/debian/changelog 2018-01-10 12:54:37.000000000 +0000 @@ -1,3 +1,9 @@ +protobuf (2.6.1-1.3ubuntu1) UNRELEASED; urgency=medium + + * Build python3 package. + + -- Andrea Azzarone Wed, 10 Jan 2018 12:54:15 +0000 + protobuf (2.6.1-1.3) unstable; urgency=medium * Non-maintainer upload. diff -Nru protobuf-2.6.1/debian/control protobuf-2.6.1/debian/control --- protobuf-2.6.1/debian/control 2015-08-06 06:47:50.000000000 +0000 +++ protobuf-2.6.1/debian/control 2018-01-10 12:42:06.000000000 +0000 @@ -17,6 +17,10 @@ , libpython-all-dev (>= 2.7) , python-setuptools , python-google-apputils + , python3-all (>= 3.3) + , libpython3-all-dev (>= 3.3) + , python3-setuptools + , python3-google-apputils # Manpage generator , xmlto # Tests @@ -181,6 +185,27 @@ need the protoc tool (in the protobuf-compiler package) to compile your definition to Python classes, and then the modules in this package will allow you to use those classes in your programs. + +Package: python3-protobuf +Architecture: any +Section: python +Depends: ${shlibs:Depends}, ${python3:Depends}, ${misc:Depends} +Description: Python 3 bindings for protocol buffers + Protocol buffers are a flexible, efficient, automated mechanism for + serializing structured data - similar to XML, but smaller, faster, and + simpler. You define how you want your data to be structured once, then you can + use special generated source code to easily write and read your structured + data to and from a variety of data streams and using a variety of languages. + You can even update your data structure without breaking deployed programs + that are compiled against the "old" format. + . + Google uses Protocol Buffers for almost all of its internal RPC protocols and + file formats. + . + This package contains the Python 3 bindings for the protocol buffers. You will + need the protoc tool (in the protobuf-compiler package) to compile your + definition to Python classes, and then the modules in this package will allow + you to use those classes in your programs. Package: libprotobuf-java Architecture: all diff -Nru protobuf-2.6.1/debian/patches/add-python3.patch protobuf-2.6.1/debian/patches/add-python3.patch --- protobuf-2.6.1/debian/patches/add-python3.patch 1970-01-01 00:00:00.000000000 +0000 +++ protobuf-2.6.1/debian/patches/add-python3.patch 2018-01-10 12:46:45.000000000 +0000 @@ -0,0 +1,25629 @@ +Description: Build python3 package. +Author: Andrea Azzarone +Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/protobuf/+bug/1735160 +Last-Update: 2018-01-10 + +--- /dev/null ++++ protobuf-2.6.1/python3/README.txt +@@ -0,0 +1,105 @@ ++Protocol Buffers - Google's data interchange format ++Copyright 2008 Google Inc. ++ ++This directory contains the Python Protocol Buffers runtime library. ++ ++Normally, this directory comes as part of the protobuf package, available ++from: ++ ++ https://developers.google.com/protocol-buffers/ ++ ++The complete package includes the C++ source code, which includes the ++Protocol Compiler (protoc). If you downloaded this package from PyPI ++or some other Python-specific source, you may have received only the ++Python part of the code. In this case, you will need to obtain the ++Protocol Compiler from some other source before you can use this ++package. ++ ++Development Warning ++=================== ++ ++The Python implementation of Protocol Buffers is not as mature as the C++ ++and Java implementations. It may be more buggy, and it is known to be ++pretty slow at this time. If you would like to help fix these issues, ++join the Protocol Buffers discussion list and let us know! ++ ++Installation ++============ ++ ++1) Make sure you have Python 2.4 or newer. If in doubt, run: ++ ++ $ python -V ++ ++2) If you do not have setuptools installed, note that it will be ++ downloaded and installed automatically as soon as you run setup.py. ++ If you would rather install it manually, you may do so by following ++ the instructions on this page: ++ ++ http://peak.telecommunity.com/DevCenter/EasyInstall#installation-instructions ++ ++3) Build the C++ code, or install a binary distribution of protoc. If ++ you install a binary distribution, make sure that it is the same ++ version as this package. If in doubt, run: ++ ++ $ protoc --version ++ ++4) Build and run the tests: ++ ++ $ python setup.py build ++ $ python setup.py google_test ++ ++ If you want to test c++ implementation, run: ++ $ python setup.py test --cpp_implementation ++ ++ If some tests fail, this library may not work correctly on your ++ system. Continue at your own risk. ++ ++ Please note that there is a known problem with some versions of ++ Python on Cygwin which causes the tests to fail after printing the ++ error: "sem_init: Resource temporarily unavailable". This appears ++ to be a bug either in Cygwin or in Python: ++ http://www.cygwin.com/ml/cygwin/2005-07/msg01378.html ++ We do not know if or when it might me fixed. We also do not know ++ how likely it is that this bug will affect users in practice. ++ ++5) Install: ++ ++ $ python setup.py install ++ or: ++ $ python setup.py install --cpp_implementation ++ ++ This step may require superuser privileges. ++ NOTE: To use C++ implementation, you need to install C++ protobuf runtime ++ library of the same version and export the environment variable before this ++ step. See the "C++ Implementation" section below for more details. ++ ++Usage ++===== ++ ++The complete documentation for Protocol Buffers is available via the ++web at: ++ ++ https://developers.google.com/protocol-buffers/ ++ ++C++ Implementation ++================== ++ ++The C++ implementation for Python messages is built as a Python extension to ++improve the overall protobuf Python performance. ++ ++To use the C++ implementation, you need to: ++1) Install the C++ protobuf runtime library, please see instructions in the ++ parent directory. ++2) Export an environment variable: ++ ++ $ export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ++ $ export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION=2 ++ ++You need to export this variable before running setup.py script to build and ++install the extension. You must also set the variable at runtime, otherwise ++the pure-Python implementation will be used. In a future release, we will ++change the default so that C++ implementation is used whenever it is available. ++It is strongly recommended to run `python setup.py test` after setting the ++variable to "cpp", so the tests will be against C++ implemented Python ++messages. ++ +--- /dev/null ++++ protobuf-2.6.1/python3/ez_setup.py +@@ -0,0 +1,284 @@ ++#!python ++ ++# This file was obtained from: ++# http://peak.telecommunity.com/dist/ez_setup.py ++# on 2011/1/21. ++ ++"""Bootstrap setuptools installation ++ ++If you want to use setuptools in your package's setup.py, just include this ++file in the same directory with it, and add this to the top of your setup.py:: ++ ++ from ez_setup import use_setuptools ++ use_setuptools() ++ ++If you want to require a specific version of setuptools, set a download ++mirror, or use an alternate download directory, you can do so by supplying ++the appropriate options to ``use_setuptools()``. ++ ++This file can also be run as a script to install or upgrade setuptools. ++""" ++import sys ++DEFAULT_VERSION = "0.6c11" ++DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3] ++ ++md5_data = { ++ 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca', ++ 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb', ++ 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b', ++ 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a', ++ 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618', ++ 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac', ++ 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5', ++ 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4', ++ 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c', ++ 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b', ++ 'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090', ++ 'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4', ++ 'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7', ++ 'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5', ++ 'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de', ++ 'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b', ++ 'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2', ++ 'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086', ++ 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27', ++ 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277', ++ 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa', ++ 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e', ++ 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e', ++ 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f', ++ 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2', ++ 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc', ++ 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167', ++ 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64', ++ 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d', ++ 'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20', ++ 'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab', ++ 'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53', ++ 'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2', ++ 'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e', ++ 'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372', ++ 'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902', ++ 'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de', ++ 'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b', ++ 'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03', ++ 'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a', ++ 'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6', ++ 'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a', ++} ++ ++import sys, os ++try: from hashlib import md5 ++except ImportError: from md5 import md5 ++ ++def _validate_md5(egg_name, data): ++ if egg_name in md5_data: ++ digest = md5(data).hexdigest() ++ if digest != md5_data[egg_name]: ++ print >>sys.stderr, ( ++ "md5 validation of %s failed! (Possible download problem?)" ++ % egg_name ++ ) ++ sys.exit(2) ++ return data ++ ++def use_setuptools( ++ version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, ++ download_delay=15 ++): ++ """Automatically find/download setuptools and make it available on sys.path ++ ++ `version` should be a valid setuptools version number that is available ++ as an egg for download under the `download_base` URL (which should end with ++ a '/'). `to_dir` is the directory where setuptools will be downloaded, if ++ it is not already available. If `download_delay` is specified, it should ++ be the number of seconds that will be paused before initiating a download, ++ should one be required. If an older version of setuptools is installed, ++ this routine will print a message to ``sys.stderr`` and raise SystemExit in ++ an attempt to abort the calling script. ++ """ ++ was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules ++ def do_download(): ++ egg = download_setuptools(version, download_base, to_dir, download_delay) ++ sys.path.insert(0, egg) ++ import setuptools; setuptools.bootstrap_install_from = egg ++ try: ++ import pkg_resources ++ except ImportError: ++ return do_download() ++ try: ++ return do_download() ++ pkg_resources.require("setuptools>="+version); return ++ except pkg_resources.VersionConflict, e: ++ if was_imported: ++ print >>sys.stderr, ( ++ "The required version of setuptools (>=%s) is not available, and\n" ++ "can't be installed while this script is running. Please install\n" ++ " a more recent version first, using 'easy_install -U setuptools'." ++ "\n\n(Currently using %r)" ++ ) % (version, e.args[0]) ++ sys.exit(2) ++ except pkg_resources.DistributionNotFound: ++ pass ++ ++ del pkg_resources, sys.modules['pkg_resources'] # reload ok ++ return do_download() ++ ++def download_setuptools( ++ version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, ++ delay = 15 ++): ++ """Download setuptools from a specified location and return its filename ++ ++ `version` should be a valid setuptools version number that is available ++ as an egg for download under the `download_base` URL (which should end ++ with a '/'). `to_dir` is the directory where the egg will be downloaded. ++ `delay` is the number of seconds to pause before an actual download attempt. ++ """ ++ import urllib2, shutil ++ egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3]) ++ url = download_base + egg_name ++ saveto = os.path.join(to_dir, egg_name) ++ src = dst = None ++ if not os.path.exists(saveto): # Avoid repeated downloads ++ try: ++ from distutils import log ++ if delay: ++ log.warn(""" ++--------------------------------------------------------------------------- ++This script requires setuptools version %s to run (even to display ++help). I will attempt to download it for you (from ++%s), but ++you may need to enable firewall access for this script first. ++I will start the download in %d seconds. ++ ++(Note: if this machine does not have network access, please obtain the file ++ ++ %s ++ ++and place it in this directory before rerunning this script.) ++---------------------------------------------------------------------------""", ++ version, download_base, delay, url ++ ); from time import sleep; sleep(delay) ++ log.warn("Downloading %s", url) ++ src = urllib2.urlopen(url) ++ # Read/write all in one block, so we don't create a corrupt file ++ # if the download is interrupted. ++ data = _validate_md5(egg_name, src.read()) ++ dst = open(saveto,"wb"); dst.write(data) ++ finally: ++ if src: src.close() ++ if dst: dst.close() ++ return os.path.realpath(saveto) ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++def main(argv, version=DEFAULT_VERSION): ++ """Install or upgrade setuptools and EasyInstall""" ++ try: ++ import setuptools ++ except ImportError: ++ egg = None ++ try: ++ egg = download_setuptools(version, delay=0) ++ sys.path.insert(0,egg) ++ from setuptools.command.easy_install import main ++ return main(list(argv)+[egg]) # we're done here ++ finally: ++ if egg and os.path.exists(egg): ++ os.unlink(egg) ++ else: ++ if setuptools.__version__ == '0.0.1': ++ print >>sys.stderr, ( ++ "You have an obsolete version of setuptools installed. Please\n" ++ "remove it from your system entirely before rerunning this script." ++ ) ++ sys.exit(2) ++ ++ req = "setuptools>="+version ++ import pkg_resources ++ try: ++ pkg_resources.require(req) ++ except pkg_resources.VersionConflict: ++ try: ++ from setuptools.command.easy_install import main ++ except ImportError: ++ from easy_install import main ++ main(list(argv)+[download_setuptools(delay=0)]) ++ sys.exit(0) # try to force an exit ++ else: ++ if argv: ++ from setuptools.command.easy_install import main ++ main(argv) ++ else: ++ print "Setuptools version",version,"or greater has been installed." ++ print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)' ++ ++def update_md5(filenames): ++ """Update our built-in md5 registry""" ++ ++ import re ++ ++ for name in filenames: ++ base = os.path.basename(name) ++ f = open(name,'rb') ++ md5_data[base] = md5(f.read()).hexdigest() ++ f.close() ++ ++ data = [" %r: %r,\n" % it for it in md5_data.items()] ++ data.sort() ++ repl = "".join(data) ++ ++ import inspect ++ srcfile = inspect.getsourcefile(sys.modules[__name__]) ++ f = open(srcfile, 'rb'); src = f.read(); f.close() ++ ++ match = re.search("\nmd5_data = {\n([^}]+)}", src) ++ if not match: ++ print >>sys.stderr, "Internal error!" ++ sys.exit(2) ++ ++ src = src[:match.start(1)] + repl + src[match.end(1):] ++ f = open(srcfile,'w') ++ f.write(src) ++ f.close() ++ ++ ++if __name__=='__main__': ++ if len(sys.argv)>2 and sys.argv[1]=='--md5update': ++ update_md5(sys.argv[2:]) ++ else: ++ main(sys.argv[1:]) +--- /dev/null ++++ protobuf-2.6.1/python3/google/__init__.py +@@ -0,0 +1 @@ ++__import__('pkg_resources').declare_namespace(__name__) +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/descriptor.py +@@ -0,0 +1,847 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# Copyright 2007 Google Inc. All Rights Reserved. ++ ++"""Descriptors essentially contain exactly the information found in a .proto ++file, in types that make this information accessible in Python. ++""" ++ ++__author__ = 'robinson@google.com (Will Robinson)' ++ ++from google.protobuf.internal import api_implementation ++ ++ ++if api_implementation.Type() == 'cpp': ++ # Used by MakeDescriptor in cpp mode ++ import os ++ import uuid ++ ++ if api_implementation.Version() == 2: ++ from google.protobuf.pyext import _message ++ else: ++ from google.protobuf.internal import cpp_message ++ ++ ++class Error(Exception): ++ """Base error for this module.""" ++ ++ ++class TypeTransformationError(Error): ++ """Error transforming between python proto type and corresponding C++ type.""" ++ ++ ++class DescriptorBase(object): ++ ++ """Descriptors base class. ++ ++ This class is the base of all descriptor classes. It provides common options ++ related functionaility. ++ ++ Attributes: ++ has_options: True if the descriptor has non-default options. Usually it ++ is not necessary to read this -- just call GetOptions() which will ++ happily return the default instance. However, it's sometimes useful ++ for efficiency, and also useful inside the protobuf implementation to ++ avoid some bootstrapping issues. ++ """ ++ ++ def __init__(self, options, options_class_name): ++ """Initialize the descriptor given its options message and the name of the ++ class of the options message. The name of the class is required in case ++ the options message is None and has to be created. ++ """ ++ self._options = options ++ self._options_class_name = options_class_name ++ ++ # Does this descriptor have non-default options? ++ self.has_options = options is not None ++ ++ def _SetOptions(self, options, options_class_name): ++ """Sets the descriptor's options ++ ++ This function is used in generated proto2 files to update descriptor ++ options. It must not be used outside proto2. ++ """ ++ self._options = options ++ self._options_class_name = options_class_name ++ ++ # Does this descriptor have non-default options? ++ self.has_options = options is not None ++ ++ def GetOptions(self): ++ """Retrieves descriptor options. ++ ++ This method returns the options set or creates the default options for the ++ descriptor. ++ """ ++ if self._options: ++ return self._options ++ from google.protobuf import descriptor_pb2 ++ try: ++ options_class = getattr(descriptor_pb2, self._options_class_name) ++ except AttributeError: ++ raise RuntimeError('Unknown options class name %s!' % ++ (self._options_class_name)) ++ self._options = options_class() ++ return self._options ++ ++ ++class _NestedDescriptorBase(DescriptorBase): ++ """Common class for descriptors that can be nested.""" ++ ++ def __init__(self, options, options_class_name, name, full_name, ++ file, containing_type, serialized_start=None, ++ serialized_end=None): ++ """Constructor. ++ ++ Args: ++ options: Protocol message options or None ++ to use default message options. ++ options_class_name: (str) The class name of the above options. ++ ++ name: (str) Name of this protocol message type. ++ full_name: (str) Fully-qualified name of this protocol message type, ++ which will include protocol "package" name and the name of any ++ enclosing types. ++ file: (FileDescriptor) Reference to file info. ++ containing_type: if provided, this is a nested descriptor, with this ++ descriptor as parent, otherwise None. ++ serialized_start: The start index (inclusive) in block in the ++ file.serialized_pb that describes this descriptor. ++ serialized_end: The end index (exclusive) in block in the ++ file.serialized_pb that describes this descriptor. ++ """ ++ super(_NestedDescriptorBase, self).__init__( ++ options, options_class_name) ++ ++ self.name = name ++ # TODO(falk): Add function to calculate full_name instead of having it in ++ # memory? ++ self.full_name = full_name ++ self.file = file ++ self.containing_type = containing_type ++ ++ self._serialized_start = serialized_start ++ self._serialized_end = serialized_end ++ ++ def GetTopLevelContainingType(self): ++ """Returns the root if this is a nested type, or itself if its the root.""" ++ desc = self ++ while desc.containing_type is not None: ++ desc = desc.containing_type ++ return desc ++ ++ def CopyToProto(self, proto): ++ """Copies this to the matching proto in descriptor_pb2. ++ ++ Args: ++ proto: An empty proto instance from descriptor_pb2. ++ ++ Raises: ++ Error: If self couldnt be serialized, due to to few constructor arguments. ++ """ ++ if (self.file is not None and ++ self._serialized_start is not None and ++ self._serialized_end is not None): ++ proto.ParseFromString(self.file.serialized_pb[ ++ self._serialized_start:self._serialized_end]) ++ else: ++ raise Error('Descriptor does not contain serialization.') ++ ++ ++class Descriptor(_NestedDescriptorBase): ++ ++ """Descriptor for a protocol message type. ++ ++ A Descriptor instance has the following attributes: ++ ++ name: (str) Name of this protocol message type. ++ full_name: (str) Fully-qualified name of this protocol message type, ++ which will include protocol "package" name and the name of any ++ enclosing types. ++ ++ containing_type: (Descriptor) Reference to the descriptor of the ++ type containing us, or None if this is top-level. ++ ++ fields: (list of FieldDescriptors) Field descriptors for all ++ fields in this type. ++ fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor ++ objects as in |fields|, but indexed by "number" attribute in each ++ FieldDescriptor. ++ fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor ++ objects as in |fields|, but indexed by "name" attribute in each ++ FieldDescriptor. ++ ++ nested_types: (list of Descriptors) Descriptor references ++ for all protocol message types nested within this one. ++ nested_types_by_name: (dict str -> Descriptor) Same Descriptor ++ objects as in |nested_types|, but indexed by "name" attribute ++ in each Descriptor. ++ ++ enum_types: (list of EnumDescriptors) EnumDescriptor references ++ for all enums contained within this type. ++ enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor ++ objects as in |enum_types|, but indexed by "name" attribute ++ in each EnumDescriptor. ++ enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping ++ from enum value name to EnumValueDescriptor for that value. ++ ++ extensions: (list of FieldDescriptor) All extensions defined directly ++ within this message type (NOT within a nested type). ++ extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor ++ objects as |extensions|, but indexed by "name" attribute of each ++ FieldDescriptor. ++ ++ is_extendable: Does this type define any extension ranges? ++ ++ options: (descriptor_pb2.MessageOptions) Protocol message options or None ++ to use default message options. ++ ++ oneofs: (list of OneofDescriptor) The list of descriptors for oneof fields ++ in this message. ++ oneofs_by_name: (dict str -> OneofDescriptor) Same objects as in |oneofs|, ++ but indexed by "name" attribute. ++ ++ file: (FileDescriptor) Reference to file descriptor. ++ """ ++ ++ # NOTE(tmarek): The file argument redefining a builtin is nothing we can ++ # fix right now since we don't know how many clients already rely on the ++ # name of the argument. ++ def __init__(self, name, full_name, filename, containing_type, fields, ++ nested_types, enum_types, extensions, options=None, ++ is_extendable=True, extension_ranges=None, oneofs=None, ++ file=None, serialized_start=None, serialized_end=None): # pylint:disable=redefined-builtin ++ """Arguments to __init__() are as described in the description ++ of Descriptor fields above. ++ ++ Note that filename is an obsolete argument, that is not used anymore. ++ Please use file.name to access this as an attribute. ++ """ ++ super(Descriptor, self).__init__( ++ options, 'MessageOptions', name, full_name, file, ++ containing_type, serialized_start=serialized_start, ++ serialized_end=serialized_end) ++ ++ # We have fields in addition to fields_by_name and fields_by_number, ++ # so that: ++ # 1. Clients can index fields by "order in which they're listed." ++ # 2. Clients can easily iterate over all fields with the terse ++ # syntax: for f in descriptor.fields: ... ++ self.fields = fields ++ for field in self.fields: ++ field.containing_type = self ++ self.fields_by_number = dict((f.number, f) for f in fields) ++ self.fields_by_name = dict((f.name, f) for f in fields) ++ ++ self.nested_types = nested_types ++ for nested_type in nested_types: ++ nested_type.containing_type = self ++ self.nested_types_by_name = dict((t.name, t) for t in nested_types) ++ ++ self.enum_types = enum_types ++ for enum_type in self.enum_types: ++ enum_type.containing_type = self ++ self.enum_types_by_name = dict((t.name, t) for t in enum_types) ++ self.enum_values_by_name = dict( ++ (v.name, v) for t in enum_types for v in t.values) ++ ++ self.extensions = extensions ++ for extension in self.extensions: ++ extension.extension_scope = self ++ self.extensions_by_name = dict((f.name, f) for f in extensions) ++ self.is_extendable = is_extendable ++ self.extension_ranges = extension_ranges ++ self.oneofs = oneofs if oneofs is not None else [] ++ self.oneofs_by_name = dict((o.name, o) for o in self.oneofs) ++ for oneof in self.oneofs: ++ oneof.containing_type = self ++ ++ def EnumValueName(self, enum, value): ++ """Returns the string name of an enum value. ++ ++ This is just a small helper method to simplify a common operation. ++ ++ Args: ++ enum: string name of the Enum. ++ value: int, value of the enum. ++ ++ Returns: ++ string name of the enum value. ++ ++ Raises: ++ KeyError if either the Enum doesn't exist or the value is not a valid ++ value for the enum. ++ """ ++ return self.enum_types_by_name[enum].values_by_number[value].name ++ ++ def CopyToProto(self, proto): ++ """Copies this to a descriptor_pb2.DescriptorProto. ++ ++ Args: ++ proto: An empty descriptor_pb2.DescriptorProto. ++ """ ++ # This function is overriden to give a better doc comment. ++ super(Descriptor, self).CopyToProto(proto) ++ ++ ++# TODO(robinson): We should have aggressive checking here, ++# for example: ++# * If you specify a repeated field, you should not be allowed ++# to specify a default value. ++# * [Other examples here as needed]. ++# ++# TODO(robinson): for this and other *Descriptor classes, we ++# might also want to lock things down aggressively (e.g., ++# prevent clients from setting the attributes). Having ++# stronger invariants here in general will reduce the number ++# of runtime checks we must do in reflection.py... ++class FieldDescriptor(DescriptorBase): ++ ++ """Descriptor for a single field in a .proto file. ++ ++ A FieldDescriptor instance has the following attributes: ++ ++ name: (str) Name of this field, exactly as it appears in .proto. ++ full_name: (str) Name of this field, including containing scope. This is ++ particularly relevant for extensions. ++ index: (int) Dense, 0-indexed index giving the order that this ++ field textually appears within its message in the .proto file. ++ number: (int) Tag number declared for this field in the .proto file. ++ ++ type: (One of the TYPE_* constants below) Declared type. ++ cpp_type: (One of the CPPTYPE_* constants below) C++ type used to ++ represent this field. ++ ++ label: (One of the LABEL_* constants below) Tells whether this ++ field is optional, required, or repeated. ++ has_default_value: (bool) True if this field has a default value defined, ++ otherwise false. ++ default_value: (Varies) Default value of this field. Only ++ meaningful for non-repeated scalar fields. Repeated fields ++ should always set this to [], and non-repeated composite ++ fields should always set this to None. ++ ++ containing_type: (Descriptor) Descriptor of the protocol message ++ type that contains this field. Set by the Descriptor constructor ++ if we're passed into one. ++ Somewhat confusingly, for extension fields, this is the ++ descriptor of the EXTENDED message, not the descriptor ++ of the message containing this field. (See is_extension and ++ extension_scope below). ++ message_type: (Descriptor) If a composite field, a descriptor ++ of the message type contained in this field. Otherwise, this is None. ++ enum_type: (EnumDescriptor) If this field contains an enum, a ++ descriptor of that enum. Otherwise, this is None. ++ ++ is_extension: True iff this describes an extension field. ++ extension_scope: (Descriptor) Only meaningful if is_extension is True. ++ Gives the message that immediately contains this extension field. ++ Will be None iff we're a top-level (file-level) extension field. ++ ++ options: (descriptor_pb2.FieldOptions) Protocol message field options or ++ None to use default field options. ++ ++ containing_oneof: (OneofDescriptor) If the field is a member of a oneof ++ union, contains its descriptor. Otherwise, None. ++ """ ++ ++ # Must be consistent with C++ FieldDescriptor::Type enum in ++ # descriptor.h. ++ # ++ # TODO(robinson): Find a way to eliminate this repetition. ++ TYPE_DOUBLE = 1 ++ TYPE_FLOAT = 2 ++ TYPE_INT64 = 3 ++ TYPE_UINT64 = 4 ++ TYPE_INT32 = 5 ++ TYPE_FIXED64 = 6 ++ TYPE_FIXED32 = 7 ++ TYPE_BOOL = 8 ++ TYPE_STRING = 9 ++ TYPE_GROUP = 10 ++ TYPE_MESSAGE = 11 ++ TYPE_BYTES = 12 ++ TYPE_UINT32 = 13 ++ TYPE_ENUM = 14 ++ TYPE_SFIXED32 = 15 ++ TYPE_SFIXED64 = 16 ++ TYPE_SINT32 = 17 ++ TYPE_SINT64 = 18 ++ MAX_TYPE = 18 ++ ++ # Must be consistent with C++ FieldDescriptor::CppType enum in ++ # descriptor.h. ++ # ++ # TODO(robinson): Find a way to eliminate this repetition. ++ CPPTYPE_INT32 = 1 ++ CPPTYPE_INT64 = 2 ++ CPPTYPE_UINT32 = 3 ++ CPPTYPE_UINT64 = 4 ++ CPPTYPE_DOUBLE = 5 ++ CPPTYPE_FLOAT = 6 ++ CPPTYPE_BOOL = 7 ++ CPPTYPE_ENUM = 8 ++ CPPTYPE_STRING = 9 ++ CPPTYPE_MESSAGE = 10 ++ MAX_CPPTYPE = 10 ++ ++ _PYTHON_TO_CPP_PROTO_TYPE_MAP = { ++ TYPE_DOUBLE: CPPTYPE_DOUBLE, ++ TYPE_FLOAT: CPPTYPE_FLOAT, ++ TYPE_ENUM: CPPTYPE_ENUM, ++ TYPE_INT64: CPPTYPE_INT64, ++ TYPE_SINT64: CPPTYPE_INT64, ++ TYPE_SFIXED64: CPPTYPE_INT64, ++ TYPE_UINT64: CPPTYPE_UINT64, ++ TYPE_FIXED64: CPPTYPE_UINT64, ++ TYPE_INT32: CPPTYPE_INT32, ++ TYPE_SFIXED32: CPPTYPE_INT32, ++ TYPE_SINT32: CPPTYPE_INT32, ++ TYPE_UINT32: CPPTYPE_UINT32, ++ TYPE_FIXED32: CPPTYPE_UINT32, ++ TYPE_BYTES: CPPTYPE_STRING, ++ TYPE_STRING: CPPTYPE_STRING, ++ TYPE_BOOL: CPPTYPE_BOOL, ++ TYPE_MESSAGE: CPPTYPE_MESSAGE, ++ TYPE_GROUP: CPPTYPE_MESSAGE ++ } ++ ++ # Must be consistent with C++ FieldDescriptor::Label enum in ++ # descriptor.h. ++ # ++ # TODO(robinson): Find a way to eliminate this repetition. ++ LABEL_OPTIONAL = 1 ++ LABEL_REQUIRED = 2 ++ LABEL_REPEATED = 3 ++ MAX_LABEL = 3 ++ ++ # Must be consistent with C++ constants kMaxNumber, kFirstReservedNumber, ++ # and kLastReservedNumber in descriptor.h ++ MAX_FIELD_NUMBER = (1 << 29) - 1 ++ FIRST_RESERVED_FIELD_NUMBER = 19000 ++ LAST_RESERVED_FIELD_NUMBER = 19999 ++ ++ def __init__(self, name, full_name, index, number, type, cpp_type, label, ++ default_value, message_type, enum_type, containing_type, ++ is_extension, extension_scope, options=None, ++ has_default_value=True, containing_oneof=None): ++ """The arguments are as described in the description of FieldDescriptor ++ attributes above. ++ ++ Note that containing_type may be None, and may be set later if necessary ++ (to deal with circular references between message types, for example). ++ Likewise for extension_scope. ++ """ ++ super(FieldDescriptor, self).__init__(options, 'FieldOptions') ++ self.name = name ++ self.full_name = full_name ++ self.index = index ++ self.number = number ++ self.type = type ++ self.cpp_type = cpp_type ++ self.label = label ++ self.has_default_value = has_default_value ++ self.default_value = default_value ++ self.containing_type = containing_type ++ self.message_type = message_type ++ self.enum_type = enum_type ++ self.is_extension = is_extension ++ self.extension_scope = extension_scope ++ self.containing_oneof = containing_oneof ++ if api_implementation.Type() == 'cpp': ++ if is_extension: ++ if api_implementation.Version() == 2: ++ # pylint: disable=protected-access ++ self._cdescriptor = ( ++ _message.Message._GetExtensionDescriptor(full_name)) ++ # pylint: enable=protected-access ++ else: ++ self._cdescriptor = cpp_message.GetExtensionDescriptor(full_name) ++ else: ++ if api_implementation.Version() == 2: ++ # pylint: disable=protected-access ++ self._cdescriptor = _message.Message._GetFieldDescriptor(full_name) ++ # pylint: enable=protected-access ++ else: ++ self._cdescriptor = cpp_message.GetFieldDescriptor(full_name) ++ else: ++ self._cdescriptor = None ++ ++ @staticmethod ++ def ProtoTypeToCppProtoType(proto_type): ++ """Converts from a Python proto type to a C++ Proto Type. ++ ++ The Python ProtocolBuffer classes specify both the 'Python' datatype and the ++ 'C++' datatype - and they're not the same. This helper method should ++ translate from one to another. ++ ++ Args: ++ proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*) ++ Returns: ++ descriptor.FieldDescriptor.CPPTYPE_*, the C++ type. ++ Raises: ++ TypeTransformationError: when the Python proto type isn't known. ++ """ ++ try: ++ return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type] ++ except KeyError: ++ raise TypeTransformationError('Unknown proto_type: %s' % proto_type) ++ ++ ++class EnumDescriptor(_NestedDescriptorBase): ++ ++ """Descriptor for an enum defined in a .proto file. ++ ++ An EnumDescriptor instance has the following attributes: ++ ++ name: (str) Name of the enum type. ++ full_name: (str) Full name of the type, including package name ++ and any enclosing type(s). ++ ++ values: (list of EnumValueDescriptors) List of the values ++ in this enum. ++ values_by_name: (dict str -> EnumValueDescriptor) Same as |values|, ++ but indexed by the "name" field of each EnumValueDescriptor. ++ values_by_number: (dict int -> EnumValueDescriptor) Same as |values|, ++ but indexed by the "number" field of each EnumValueDescriptor. ++ containing_type: (Descriptor) Descriptor of the immediate containing ++ type of this enum, or None if this is an enum defined at the ++ top level in a .proto file. Set by Descriptor's constructor ++ if we're passed into one. ++ file: (FileDescriptor) Reference to file descriptor. ++ options: (descriptor_pb2.EnumOptions) Enum options message or ++ None to use default enum options. ++ """ ++ ++ def __init__(self, name, full_name, filename, values, ++ containing_type=None, options=None, file=None, ++ serialized_start=None, serialized_end=None): ++ """Arguments are as described in the attribute description above. ++ ++ Note that filename is an obsolete argument, that is not used anymore. ++ Please use file.name to access this as an attribute. ++ """ ++ super(EnumDescriptor, self).__init__( ++ options, 'EnumOptions', name, full_name, file, ++ containing_type, serialized_start=serialized_start, ++ serialized_end=serialized_end) ++ ++ self.values = values ++ for value in self.values: ++ value.type = self ++ self.values_by_name = dict((v.name, v) for v in values) ++ self.values_by_number = dict((v.number, v) for v in values) ++ ++ def CopyToProto(self, proto): ++ """Copies this to a descriptor_pb2.EnumDescriptorProto. ++ ++ Args: ++ proto: An empty descriptor_pb2.EnumDescriptorProto. ++ """ ++ # This function is overriden to give a better doc comment. ++ super(EnumDescriptor, self).CopyToProto(proto) ++ ++ ++class EnumValueDescriptor(DescriptorBase): ++ ++ """Descriptor for a single value within an enum. ++ ++ name: (str) Name of this value. ++ index: (int) Dense, 0-indexed index giving the order that this ++ value appears textually within its enum in the .proto file. ++ number: (int) Actual number assigned to this enum value. ++ type: (EnumDescriptor) EnumDescriptor to which this value ++ belongs. Set by EnumDescriptor's constructor if we're ++ passed into one. ++ options: (descriptor_pb2.EnumValueOptions) Enum value options message or ++ None to use default enum value options options. ++ """ ++ ++ def __init__(self, name, index, number, type=None, options=None): ++ """Arguments are as described in the attribute description above.""" ++ super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions') ++ self.name = name ++ self.index = index ++ self.number = number ++ self.type = type ++ ++ ++class OneofDescriptor(object): ++ """Descriptor for a oneof field. ++ ++ name: (str) Name of the oneof field. ++ full_name: (str) Full name of the oneof field, including package name. ++ index: (int) 0-based index giving the order of the oneof field inside ++ its containing type. ++ containing_type: (Descriptor) Descriptor of the protocol message ++ type that contains this field. Set by the Descriptor constructor ++ if we're passed into one. ++ fields: (list of FieldDescriptor) The list of field descriptors this ++ oneof can contain. ++ """ ++ ++ def __init__(self, name, full_name, index, containing_type, fields): ++ """Arguments are as described in the attribute description above.""" ++ self.name = name ++ self.full_name = full_name ++ self.index = index ++ self.containing_type = containing_type ++ self.fields = fields ++ ++ ++class ServiceDescriptor(_NestedDescriptorBase): ++ ++ """Descriptor for a service. ++ ++ name: (str) Name of the service. ++ full_name: (str) Full name of the service, including package name. ++ index: (int) 0-indexed index giving the order that this services ++ definition appears withing the .proto file. ++ methods: (list of MethodDescriptor) List of methods provided by this ++ service. ++ options: (descriptor_pb2.ServiceOptions) Service options message or ++ None to use default service options. ++ file: (FileDescriptor) Reference to file info. ++ """ ++ ++ def __init__(self, name, full_name, index, methods, options=None, file=None, ++ serialized_start=None, serialized_end=None): ++ super(ServiceDescriptor, self).__init__( ++ options, 'ServiceOptions', name, full_name, file, ++ None, serialized_start=serialized_start, ++ serialized_end=serialized_end) ++ self.index = index ++ self.methods = methods ++ # Set the containing service for each method in this service. ++ for method in self.methods: ++ method.containing_service = self ++ ++ def FindMethodByName(self, name): ++ """Searches for the specified method, and returns its descriptor.""" ++ for method in self.methods: ++ if name == method.name: ++ return method ++ return None ++ ++ def CopyToProto(self, proto): ++ """Copies this to a descriptor_pb2.ServiceDescriptorProto. ++ ++ Args: ++ proto: An empty descriptor_pb2.ServiceDescriptorProto. ++ """ ++ # This function is overriden to give a better doc comment. ++ super(ServiceDescriptor, self).CopyToProto(proto) ++ ++ ++class MethodDescriptor(DescriptorBase): ++ ++ """Descriptor for a method in a service. ++ ++ name: (str) Name of the method within the service. ++ full_name: (str) Full name of method. ++ index: (int) 0-indexed index of the method inside the service. ++ containing_service: (ServiceDescriptor) The service that contains this ++ method. ++ input_type: The descriptor of the message that this method accepts. ++ output_type: The descriptor of the message that this method returns. ++ options: (descriptor_pb2.MethodOptions) Method options message or ++ None to use default method options. ++ """ ++ ++ def __init__(self, name, full_name, index, containing_service, ++ input_type, output_type, options=None): ++ """The arguments are as described in the description of MethodDescriptor ++ attributes above. ++ ++ Note that containing_service may be None, and may be set later if necessary. ++ """ ++ super(MethodDescriptor, self).__init__(options, 'MethodOptions') ++ self.name = name ++ self.full_name = full_name ++ self.index = index ++ self.containing_service = containing_service ++ self.input_type = input_type ++ self.output_type = output_type ++ ++ ++class FileDescriptor(DescriptorBase): ++ """Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto. ++ ++ Note that enum_types_by_name, extensions_by_name, and dependencies ++ fields are only set by the message_factory module, and not by the ++ generated proto code. ++ ++ name: name of file, relative to root of source tree. ++ package: name of the package ++ serialized_pb: (str) Byte string of serialized ++ descriptor_pb2.FileDescriptorProto. ++ dependencies: List of other FileDescriptors this FileDescriptor depends on. ++ message_types_by_name: Dict of message names of their descriptors. ++ enum_types_by_name: Dict of enum names and their descriptors. ++ extensions_by_name: Dict of extension names and their descriptors. ++ """ ++ ++ def __init__(self, name, package, options=None, serialized_pb=None, ++ dependencies=None): ++ """Constructor.""" ++ super(FileDescriptor, self).__init__(options, 'FileOptions') ++ ++ self.message_types_by_name = {} ++ self.name = name ++ self.package = package ++ self.serialized_pb = serialized_pb ++ ++ self.enum_types_by_name = {} ++ self.extensions_by_name = {} ++ self.dependencies = (dependencies or []) ++ ++ if (api_implementation.Type() == 'cpp' and ++ self.serialized_pb is not None): ++ if api_implementation.Version() == 2: ++ # pylint: disable=protected-access ++ _message.Message._BuildFile(self.serialized_pb) ++ # pylint: enable=protected-access ++ else: ++ cpp_message.BuildFile(self.serialized_pb) ++ ++ def CopyToProto(self, proto): ++ """Copies this to a descriptor_pb2.FileDescriptorProto. ++ ++ Args: ++ proto: An empty descriptor_pb2.FileDescriptorProto. ++ """ ++ proto.ParseFromString(self.serialized_pb) ++ ++ ++def _ParseOptions(message, string): ++ """Parses serialized options. ++ ++ This helper function is used to parse serialized options in generated ++ proto2 files. It must not be used outside proto2. ++ """ ++ message.ParseFromString(string) ++ return message ++ ++ ++def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True): ++ """Make a protobuf Descriptor given a DescriptorProto protobuf. ++ ++ Handles nested descriptors. Note that this is limited to the scope of defining ++ a message inside of another message. Composite fields can currently only be ++ resolved if the message is defined in the same scope as the field. ++ ++ Args: ++ desc_proto: The descriptor_pb2.DescriptorProto protobuf message. ++ package: Optional package name for the new message Descriptor (string). ++ build_file_if_cpp: Update the C++ descriptor pool if api matches. ++ Set to False on recursion, so no duplicates are created. ++ Returns: ++ A Descriptor for protobuf messages. ++ """ ++ if api_implementation.Type() == 'cpp' and build_file_if_cpp: ++ # The C++ implementation requires all descriptors to be backed by the same ++ # definition in the C++ descriptor pool. To do this, we build a ++ # FileDescriptorProto with the same definition as this descriptor and build ++ # it into the pool. ++ from google.protobuf import descriptor_pb2 ++ file_descriptor_proto = descriptor_pb2.FileDescriptorProto() ++ file_descriptor_proto.message_type.add().MergeFrom(desc_proto) ++ ++ # Generate a random name for this proto file to prevent conflicts with ++ # any imported ones. We need to specify a file name so BuildFile accepts ++ # our FileDescriptorProto, but it is not important what that file name ++ # is actually set to. ++ proto_name = str(uuid.uuid4()) ++ ++ if package: ++ file_descriptor_proto.name = os.path.join(package.replace('.', '/'), ++ proto_name + '.proto') ++ file_descriptor_proto.package = package ++ else: ++ file_descriptor_proto.name = proto_name + '.proto' ++ ++ if api_implementation.Version() == 2: ++ # pylint: disable=protected-access ++ _message.Message._BuildFile(file_descriptor_proto.SerializeToString()) ++ # pylint: enable=protected-access ++ else: ++ cpp_message.BuildFile(file_descriptor_proto.SerializeToString()) ++ ++ full_message_name = [desc_proto.name] ++ if package: full_message_name.insert(0, package) ++ ++ # Create Descriptors for enum types ++ enum_types = {} ++ for enum_proto in desc_proto.enum_type: ++ full_name = '.'.join(full_message_name + [enum_proto.name]) ++ enum_desc = EnumDescriptor( ++ enum_proto.name, full_name, None, [ ++ EnumValueDescriptor(enum_val.name, ii, enum_val.number) ++ for ii, enum_val in enumerate(enum_proto.value)]) ++ enum_types[full_name] = enum_desc ++ ++ # Create Descriptors for nested types ++ nested_types = {} ++ for nested_proto in desc_proto.nested_type: ++ full_name = '.'.join(full_message_name + [nested_proto.name]) ++ # Nested types are just those defined inside of the message, not all types ++ # used by fields in the message, so no loops are possible here. ++ nested_desc = MakeDescriptor(nested_proto, ++ package='.'.join(full_message_name), ++ build_file_if_cpp=False) ++ nested_types[full_name] = nested_desc ++ ++ fields = [] ++ for field_proto in desc_proto.field: ++ full_name = '.'.join(full_message_name + [field_proto.name]) ++ enum_desc = None ++ nested_desc = None ++ if field_proto.HasField('type_name'): ++ type_name = field_proto.type_name ++ full_type_name = '.'.join(full_message_name + ++ [type_name[type_name.rfind('.')+1:]]) ++ if full_type_name in nested_types: ++ nested_desc = nested_types[full_type_name] ++ elif full_type_name in enum_types: ++ enum_desc = enum_types[full_type_name] ++ # Else type_name references a non-local type, which isn't implemented ++ field = FieldDescriptor( ++ field_proto.name, full_name, field_proto.number - 1, ++ field_proto.number, field_proto.type, ++ FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type), ++ field_proto.label, None, nested_desc, enum_desc, None, False, None, ++ has_default_value=False) ++ fields.append(field) ++ ++ desc_name = '.'.join(full_message_name) ++ return Descriptor(desc_proto.name, desc_name, None, None, fields, ++ list(nested_types.values()), list(enum_types.values()), []) +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/descriptor_database.py +@@ -0,0 +1,137 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Provides a container for DescriptorProtos.""" ++ ++__author__ = 'matthewtoia@google.com (Matt Toia)' ++ ++ ++class Error(Exception): ++ pass ++ ++ ++class DescriptorDatabaseConflictingDefinitionError(Error): ++ """Raised when a proto is added with the same name & different descriptor.""" ++ ++ ++class DescriptorDatabase(object): ++ """A container accepting FileDescriptorProtos and maps DescriptorProtos.""" ++ ++ def __init__(self): ++ self._file_desc_protos_by_file = {} ++ self._file_desc_protos_by_symbol = {} ++ ++ def Add(self, file_desc_proto): ++ """Adds the FileDescriptorProto and its types to this database. ++ ++ Args: ++ file_desc_proto: The FileDescriptorProto to add. ++ Raises: ++ DescriptorDatabaseException: if an attempt is made to add a proto ++ with the same name but different definition than an exisiting ++ proto in the database. ++ """ ++ proto_name = file_desc_proto.name ++ if proto_name not in self._file_desc_protos_by_file: ++ self._file_desc_protos_by_file[proto_name] = file_desc_proto ++ elif self._file_desc_protos_by_file[proto_name] != file_desc_proto: ++ raise DescriptorDatabaseConflictingDefinitionError( ++ '%s already added, but with different descriptor.' % proto_name) ++ ++ package = file_desc_proto.package ++ for message in file_desc_proto.message_type: ++ self._file_desc_protos_by_symbol.update( ++ (name, file_desc_proto) for name in _ExtractSymbols(message, package)) ++ for enum in file_desc_proto.enum_type: ++ self._file_desc_protos_by_symbol[ ++ '.'.join((package, enum.name))] = file_desc_proto ++ ++ def FindFileByName(self, name): ++ """Finds the file descriptor proto by file name. ++ ++ Typically the file name is a relative path ending to a .proto file. The ++ proto with the given name will have to have been added to this database ++ using the Add method or else an error will be raised. ++ ++ Args: ++ name: The file name to find. ++ ++ Returns: ++ The file descriptor proto matching the name. ++ ++ Raises: ++ KeyError if no file by the given name was added. ++ """ ++ ++ return self._file_desc_protos_by_file[name] ++ ++ def FindFileContainingSymbol(self, symbol): ++ """Finds the file descriptor proto containing the specified symbol. ++ ++ The symbol should be a fully qualified name including the file descriptor's ++ package and any containing messages. Some examples: ++ ++ 'some.package.name.Message' ++ 'some.package.name.Message.NestedEnum' ++ ++ The file descriptor proto containing the specified symbol must be added to ++ this database using the Add method or else an error will be raised. ++ ++ Args: ++ symbol: The fully qualified symbol name. ++ ++ Returns: ++ The file descriptor proto containing the symbol. ++ ++ Raises: ++ KeyError if no file contains the specified symbol. ++ """ ++ ++ return self._file_desc_protos_by_symbol[symbol] ++ ++ ++def _ExtractSymbols(desc_proto, package): ++ """Pulls out all the symbols from a descriptor proto. ++ ++ Args: ++ desc_proto: The proto to extract symbols from. ++ package: The package containing the descriptor type. ++ ++ Yields: ++ The fully qualified name found in the descriptor. ++ """ ++ ++ message_name = '.'.join((package, desc_proto.name)) ++ yield message_name ++ for nested_type in desc_proto.nested_type: ++ for symbol in _ExtractSymbols(nested_type, message_name): ++ yield symbol ++ for enum_type in desc_proto.enum_type: ++ yield '.'.join((message_name, enum_type.name)) +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/descriptor_pool.py +@@ -0,0 +1,639 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Provides DescriptorPool to use as a container for proto2 descriptors. ++ ++The DescriptorPool is used in conjection with a DescriptorDatabase to maintain ++a collection of protocol buffer descriptors for use when dynamically creating ++message types at runtime. ++ ++For most applications protocol buffers should be used via modules generated by ++the protocol buffer compiler tool. This should only be used when the type of ++protocol buffers used in an application or library cannot be predetermined. ++ ++Below is a straightforward example on how to use this class: ++ ++ pool = DescriptorPool() ++ file_descriptor_protos = [ ... ] ++ for file_descriptor_proto in file_descriptor_protos: ++ pool.Add(file_descriptor_proto) ++ my_message_descriptor = pool.FindMessageTypeByName('some.package.MessageType') ++ ++The message descriptor can be used in conjunction with the message_factory ++module in order to create a protocol buffer class that can be encoded and ++decoded. ++ ++If you want to get a Python class for the specified proto, use the ++helper functions inside google.protobuf.message_factory ++directly instead of this class. ++""" ++ ++__author__ = 'matthewtoia@google.com (Matt Toia)' ++ ++from google.protobuf import descriptor ++from google.protobuf import descriptor_database ++from google.protobuf import text_encoding ++ ++ ++def _NormalizeFullyQualifiedName(name): ++ """Remove leading period from fully-qualified type name. ++ ++ Due to b/13860351 in descriptor_database.py, types in the root namespace are ++ generated with a leading period. This function removes that prefix. ++ ++ Args: ++ name: A str, the fully-qualified symbol name. ++ ++ Returns: ++ A str, the normalized fully-qualified symbol name. ++ """ ++ return name.lstrip('.') ++ ++ ++class DescriptorPool(object): ++ """A collection of protobufs dynamically constructed by descriptor protos.""" ++ ++ def __init__(self, descriptor_db=None): ++ """Initializes a Pool of proto buffs. ++ ++ The descriptor_db argument to the constructor is provided to allow ++ specialized file descriptor proto lookup code to be triggered on demand. An ++ example would be an implementation which will read and compile a file ++ specified in a call to FindFileByName() and not require the call to Add() ++ at all. Results from this database will be cached internally here as well. ++ ++ Args: ++ descriptor_db: A secondary source of file descriptors. ++ """ ++ ++ self._internal_db = descriptor_database.DescriptorDatabase() ++ self._descriptor_db = descriptor_db ++ self._descriptors = {} ++ self._enum_descriptors = {} ++ self._file_descriptors = {} ++ ++ def Add(self, file_desc_proto): ++ """Adds the FileDescriptorProto and its types to this pool. ++ ++ Args: ++ file_desc_proto: The FileDescriptorProto to add. ++ """ ++ ++ self._internal_db.Add(file_desc_proto) ++ ++ def AddDescriptor(self, desc): ++ """Adds a Descriptor to the pool, non-recursively. ++ ++ If the Descriptor contains nested messages or enums, the caller must ++ explicitly register them. This method also registers the FileDescriptor ++ associated with the message. ++ ++ Args: ++ desc: A Descriptor. ++ """ ++ if not isinstance(desc, descriptor.Descriptor): ++ raise TypeError('Expected instance of descriptor.Descriptor.') ++ ++ self._descriptors[desc.full_name] = desc ++ self.AddFileDescriptor(desc.file) ++ ++ def AddEnumDescriptor(self, enum_desc): ++ """Adds an EnumDescriptor to the pool. ++ ++ This method also registers the FileDescriptor associated with the message. ++ ++ Args: ++ enum_desc: An EnumDescriptor. ++ """ ++ ++ if not isinstance(enum_desc, descriptor.EnumDescriptor): ++ raise TypeError('Expected instance of descriptor.EnumDescriptor.') ++ ++ self._enum_descriptors[enum_desc.full_name] = enum_desc ++ self.AddFileDescriptor(enum_desc.file) ++ ++ def AddFileDescriptor(self, file_desc): ++ """Adds a FileDescriptor to the pool, non-recursively. ++ ++ If the FileDescriptor contains messages or enums, the caller must explicitly ++ register them. ++ ++ Args: ++ file_desc: A FileDescriptor. ++ """ ++ ++ if not isinstance(file_desc, descriptor.FileDescriptor): ++ raise TypeError('Expected instance of descriptor.FileDescriptor.') ++ self._file_descriptors[file_desc.name] = file_desc ++ ++ def FindFileByName(self, file_name): ++ """Gets a FileDescriptor by file name. ++ ++ Args: ++ file_name: The path to the file to get a descriptor for. ++ ++ Returns: ++ A FileDescriptor for the named file. ++ ++ Raises: ++ KeyError: if the file can not be found in the pool. ++ """ ++ ++ try: ++ return self._file_descriptors[file_name] ++ except KeyError: ++ pass ++ ++ try: ++ file_proto = self._internal_db.FindFileByName(file_name) ++ except KeyError as error: ++ if self._descriptor_db: ++ file_proto = self._descriptor_db.FindFileByName(file_name) ++ else: ++ raise error ++ if not file_proto: ++ raise KeyError('Cannot find a file named %s' % file_name) ++ return self._ConvertFileProtoToFileDescriptor(file_proto) ++ ++ def FindFileContainingSymbol(self, symbol): ++ """Gets the FileDescriptor for the file containing the specified symbol. ++ ++ Args: ++ symbol: The name of the symbol to search for. ++ ++ Returns: ++ A FileDescriptor that contains the specified symbol. ++ ++ Raises: ++ KeyError: if the file can not be found in the pool. ++ """ ++ ++ symbol = _NormalizeFullyQualifiedName(symbol) ++ try: ++ return self._descriptors[symbol].file ++ except KeyError: ++ pass ++ ++ try: ++ return self._enum_descriptors[symbol].file ++ except KeyError: ++ pass ++ ++ try: ++ file_proto = self._internal_db.FindFileContainingSymbol(symbol) ++ except KeyError as error: ++ if self._descriptor_db: ++ file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) ++ else: ++ raise error ++ if not file_proto: ++ raise KeyError('Cannot find a file containing %s' % symbol) ++ return self._ConvertFileProtoToFileDescriptor(file_proto) ++ ++ def FindMessageTypeByName(self, full_name): ++ """Loads the named descriptor from the pool. ++ ++ Args: ++ full_name: The full name of the descriptor to load. ++ ++ Returns: ++ The descriptor for the named type. ++ """ ++ ++ full_name = _NormalizeFullyQualifiedName(full_name) ++ if full_name not in self._descriptors: ++ self.FindFileContainingSymbol(full_name) ++ return self._descriptors[full_name] ++ ++ def FindEnumTypeByName(self, full_name): ++ """Loads the named enum descriptor from the pool. ++ ++ Args: ++ full_name: The full name of the enum descriptor to load. ++ ++ Returns: ++ The enum descriptor for the named type. ++ """ ++ ++ full_name = _NormalizeFullyQualifiedName(full_name) ++ if full_name not in self._enum_descriptors: ++ self.FindFileContainingSymbol(full_name) ++ return self._enum_descriptors[full_name] ++ ++ def _ConvertFileProtoToFileDescriptor(self, file_proto): ++ """Creates a FileDescriptor from a proto or returns a cached copy. ++ ++ This method also has the side effect of loading all the symbols found in ++ the file into the appropriate dictionaries in the pool. ++ ++ Args: ++ file_proto: The proto to convert. ++ ++ Returns: ++ A FileDescriptor matching the passed in proto. ++ """ ++ ++ if file_proto.name not in self._file_descriptors: ++ built_deps = list(self._GetDeps(file_proto.dependency)) ++ direct_deps = [self.FindFileByName(n) for n in file_proto.dependency] ++ ++ file_descriptor = descriptor.FileDescriptor( ++ name=file_proto.name, ++ package=file_proto.package, ++ options=file_proto.options, ++ serialized_pb=file_proto.SerializeToString(), ++ dependencies=direct_deps) ++ scope = {} ++ ++ # This loop extracts all the message and enum types from all the ++ # dependencoes of the file_proto. This is necessary to create the ++ # scope of available message types when defining the passed in ++ # file proto. ++ for dependency in built_deps: ++ scope.update(self._ExtractSymbols( ++ list(dependency.message_types_by_name.values()))) ++ scope.update((_PrefixWithDot(enum.full_name), enum) ++ for enum in list(dependency.enum_types_by_name.values())) ++ ++ for message_type in file_proto.message_type: ++ message_desc = self._ConvertMessageDescriptor( ++ message_type, file_proto.package, file_descriptor, scope) ++ file_descriptor.message_types_by_name[message_desc.name] = message_desc ++ ++ for enum_type in file_proto.enum_type: ++ file_descriptor.enum_types_by_name[enum_type.name] = ( ++ self._ConvertEnumDescriptor(enum_type, file_proto.package, ++ file_descriptor, None, scope)) ++ ++ for index, extension_proto in enumerate(file_proto.extension): ++ extension_desc = self.MakeFieldDescriptor( ++ extension_proto, file_proto.package, index, is_extension=True) ++ extension_desc.containing_type = self._GetTypeFromScope( ++ file_descriptor.package, extension_proto.extendee, scope) ++ self.SetFieldType(extension_proto, extension_desc, ++ file_descriptor.package, scope) ++ file_descriptor.extensions_by_name[extension_desc.name] = extension_desc ++ ++ for desc_proto in file_proto.message_type: ++ self.SetAllFieldTypes(file_proto.package, desc_proto, scope) ++ ++ if file_proto.package: ++ desc_proto_prefix = _PrefixWithDot(file_proto.package) ++ else: ++ desc_proto_prefix = '' ++ ++ for desc_proto in file_proto.message_type: ++ desc = self._GetTypeFromScope(desc_proto_prefix, desc_proto.name, scope) ++ file_descriptor.message_types_by_name[desc_proto.name] = desc ++ self.Add(file_proto) ++ self._file_descriptors[file_proto.name] = file_descriptor ++ ++ return self._file_descriptors[file_proto.name] ++ ++ def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None, ++ scope=None): ++ """Adds the proto to the pool in the specified package. ++ ++ Args: ++ desc_proto: The descriptor_pb2.DescriptorProto protobuf message. ++ package: The package the proto should be located in. ++ file_desc: The file containing this message. ++ scope: Dict mapping short and full symbols to message and enum types. ++ ++ Returns: ++ The added descriptor. ++ """ ++ ++ if package: ++ desc_name = '.'.join((package, desc_proto.name)) ++ else: ++ desc_name = desc_proto.name ++ ++ if file_desc is None: ++ file_name = None ++ else: ++ file_name = file_desc.name ++ ++ if scope is None: ++ scope = {} ++ ++ nested = [ ++ self._ConvertMessageDescriptor(nested, desc_name, file_desc, scope) ++ for nested in desc_proto.nested_type] ++ enums = [ ++ self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope) ++ for enum in desc_proto.enum_type] ++ fields = [self.MakeFieldDescriptor(field, desc_name, index) ++ for index, field in enumerate(desc_proto.field)] ++ extensions = [ ++ self.MakeFieldDescriptor(extension, desc_name, index, is_extension=True) ++ for index, extension in enumerate(desc_proto.extension)] ++ oneofs = [ ++ descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)), ++ index, None, []) ++ for index, desc in enumerate(desc_proto.oneof_decl)] ++ extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range] ++ if extension_ranges: ++ is_extendable = True ++ else: ++ is_extendable = False ++ desc = descriptor.Descriptor( ++ name=desc_proto.name, ++ full_name=desc_name, ++ filename=file_name, ++ containing_type=None, ++ fields=fields, ++ oneofs=oneofs, ++ nested_types=nested, ++ enum_types=enums, ++ extensions=extensions, ++ options=desc_proto.options, ++ is_extendable=is_extendable, ++ extension_ranges=extension_ranges, ++ file=file_desc, ++ serialized_start=None, ++ serialized_end=None) ++ for nested in desc.nested_types: ++ nested.containing_type = desc ++ for enum in desc.enum_types: ++ enum.containing_type = desc ++ for field_index, field_desc in enumerate(desc_proto.field): ++ if field_desc.HasField('oneof_index'): ++ oneof_index = field_desc.oneof_index ++ oneofs[oneof_index].fields.append(fields[field_index]) ++ fields[field_index].containing_oneof = oneofs[oneof_index] ++ ++ scope[_PrefixWithDot(desc_name)] = desc ++ self._descriptors[desc_name] = desc ++ return desc ++ ++ def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None, ++ containing_type=None, scope=None): ++ """Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf. ++ ++ Args: ++ enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message. ++ package: Optional package name for the new message EnumDescriptor. ++ file_desc: The file containing the enum descriptor. ++ containing_type: The type containing this enum. ++ scope: Scope containing available types. ++ ++ Returns: ++ The added descriptor ++ """ ++ ++ if package: ++ enum_name = '.'.join((package, enum_proto.name)) ++ else: ++ enum_name = enum_proto.name ++ ++ if file_desc is None: ++ file_name = None ++ else: ++ file_name = file_desc.name ++ ++ values = [self._MakeEnumValueDescriptor(value, index) ++ for index, value in enumerate(enum_proto.value)] ++ desc = descriptor.EnumDescriptor(name=enum_proto.name, ++ full_name=enum_name, ++ filename=file_name, ++ file=file_desc, ++ values=values, ++ containing_type=containing_type, ++ options=enum_proto.options) ++ scope['.%s' % enum_name] = desc ++ self._enum_descriptors[enum_name] = desc ++ return desc ++ ++ def MakeFieldDescriptor(self, field_proto, message_name, index, ++ is_extension=False): ++ """Creates a field descriptor from a FieldDescriptorProto. ++ ++ For message and enum type fields, this method will do a look up ++ in the pool for the appropriate descriptor for that type. If it ++ is unavailable, it will fall back to the _source function to ++ create it. If this type is still unavailable, construction will ++ fail. ++ ++ Args: ++ field_proto: The proto describing the field. ++ message_name: The name of the containing message. ++ index: Index of the field ++ is_extension: Indication that this field is for an extension. ++ ++ Returns: ++ An initialized FieldDescriptor object ++ """ ++ ++ if message_name: ++ full_name = '.'.join((message_name, field_proto.name)) ++ else: ++ full_name = field_proto.name ++ ++ return descriptor.FieldDescriptor( ++ name=field_proto.name, ++ full_name=full_name, ++ index=index, ++ number=field_proto.number, ++ type=field_proto.type, ++ cpp_type=None, ++ message_type=None, ++ enum_type=None, ++ containing_type=None, ++ label=field_proto.label, ++ has_default_value=False, ++ default_value=None, ++ is_extension=is_extension, ++ extension_scope=None, ++ options=field_proto.options) ++ ++ def SetAllFieldTypes(self, package, desc_proto, scope): ++ """Sets all the descriptor's fields's types. ++ ++ This method also sets the containing types on any extensions. ++ ++ Args: ++ package: The current package of desc_proto. ++ desc_proto: The message descriptor to update. ++ scope: Enclosing scope of available types. ++ """ ++ ++ package = _PrefixWithDot(package) ++ ++ main_desc = self._GetTypeFromScope(package, desc_proto.name, scope) ++ ++ if package == '.': ++ nested_package = _PrefixWithDot(desc_proto.name) ++ else: ++ nested_package = '.'.join([package, desc_proto.name]) ++ ++ for field_proto, field_desc in zip(desc_proto.field, main_desc.fields): ++ self.SetFieldType(field_proto, field_desc, nested_package, scope) ++ ++ for extension_proto, extension_desc in ( ++ zip(desc_proto.extension, main_desc.extensions)): ++ extension_desc.containing_type = self._GetTypeFromScope( ++ nested_package, extension_proto.extendee, scope) ++ self.SetFieldType(extension_proto, extension_desc, nested_package, scope) ++ ++ for nested_type in desc_proto.nested_type: ++ self.SetAllFieldTypes(nested_package, nested_type, scope) ++ ++ def SetFieldType(self, field_proto, field_desc, package, scope): ++ """Sets the field's type, cpp_type, message_type and enum_type. ++ ++ Args: ++ field_proto: Data about the field in proto format. ++ field_desc: The descriptor to modiy. ++ package: The package the field's container is in. ++ scope: Enclosing scope of available types. ++ """ ++ if field_proto.type_name: ++ desc = self._GetTypeFromScope(package, field_proto.type_name, scope) ++ else: ++ desc = None ++ ++ if not field_proto.HasField('type'): ++ if isinstance(desc, descriptor.Descriptor): ++ field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE ++ else: ++ field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM ++ ++ field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType( ++ field_proto.type) ++ ++ if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE ++ or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP): ++ field_desc.message_type = desc ++ ++ if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: ++ field_desc.enum_type = desc ++ ++ if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED: ++ field_desc.has_default_value = False ++ field_desc.default_value = [] ++ elif field_proto.HasField('default_value'): ++ field_desc.has_default_value = True ++ if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or ++ field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): ++ field_desc.default_value = float(field_proto.default_value) ++ elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: ++ field_desc.default_value = field_proto.default_value ++ elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: ++ field_desc.default_value = field_proto.default_value.lower() == 'true' ++ elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: ++ field_desc.default_value = field_desc.enum_type.values_by_name[ ++ field_proto.default_value].index ++ elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: ++ field_desc.default_value = text_encoding.CUnescape( ++ field_proto.default_value) ++ else: ++ field_desc.default_value = int(field_proto.default_value) ++ else: ++ field_desc.has_default_value = False ++ field_desc.default_value = None ++ ++ field_desc.type = field_proto.type ++ ++ def _MakeEnumValueDescriptor(self, value_proto, index): ++ """Creates a enum value descriptor object from a enum value proto. ++ ++ Args: ++ value_proto: The proto describing the enum value. ++ index: The index of the enum value. ++ ++ Returns: ++ An initialized EnumValueDescriptor object. ++ """ ++ ++ return descriptor.EnumValueDescriptor( ++ name=value_proto.name, ++ index=index, ++ number=value_proto.number, ++ options=value_proto.options, ++ type=None) ++ ++ def _ExtractSymbols(self, descriptors): ++ """Pulls out all the symbols from descriptor protos. ++ ++ Args: ++ descriptors: The messages to extract descriptors from. ++ Yields: ++ A two element tuple of the type name and descriptor object. ++ """ ++ ++ for desc in descriptors: ++ yield (_PrefixWithDot(desc.full_name), desc) ++ for symbol in self._ExtractSymbols(desc.nested_types): ++ yield symbol ++ for enum in desc.enum_types: ++ yield (_PrefixWithDot(enum.full_name), enum) ++ ++ def _GetDeps(self, dependencies): ++ """Recursively finds dependencies for file protos. ++ ++ Args: ++ dependencies: The names of the files being depended on. ++ ++ Yields: ++ Each direct and indirect dependency. ++ """ ++ ++ for dependency in dependencies: ++ dep_desc = self.FindFileByName(dependency) ++ yield dep_desc ++ for parent_dep in dep_desc.dependencies: ++ yield parent_dep ++ ++ def _GetTypeFromScope(self, package, type_name, scope): ++ """Finds a given type name in the current scope. ++ ++ Args: ++ package: The package the proto should be located in. ++ type_name: The name of the type to be found in the scope. ++ scope: Dict mapping short and full symbols to message and enum types. ++ ++ Returns: ++ The descriptor for the requested type. ++ """ ++ if type_name not in scope: ++ components = _PrefixWithDot(package).split('.') ++ while components: ++ possible_match = '.'.join(components + [type_name]) ++ if possible_match in scope: ++ type_name = possible_match ++ break ++ else: ++ components.pop(-1) ++ return scope[type_name] ++ ++ ++def _PrefixWithDot(name): ++ return name if name.startswith('.') else '.%s' % name +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/api_implementation.cc +@@ -0,0 +1,139 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++#include ++ ++namespace google { ++namespace protobuf { ++namespace python { ++ ++// Version constant. ++// This is either 0 for python, 1 for CPP V1, 2 for CPP V2. ++// ++// 0 is default and is equivalent to ++// PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python ++// ++// 1 is set with -DPYTHON_PROTO2_CPP_IMPL_V1 and is equivalent to ++// PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ++// and ++// PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION=1 ++// ++// 2 is set with -DPYTHON_PROTO2_CPP_IMPL_V2 and is equivalent to ++// PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ++// and ++// PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION=2 ++#ifdef PYTHON_PROTO2_CPP_IMPL_V1 ++#if PY_MAJOR_VERSION >= 3 ++#error "PYTHON_PROTO2_CPP_IMPL_V1 is not supported under Python 3." ++#endif ++static int kImplVersion = 1; ++#else ++#ifdef PYTHON_PROTO2_CPP_IMPL_V2 ++static int kImplVersion = 2; ++#else ++#ifdef PYTHON_PROTO2_PYTHON_IMPL ++static int kImplVersion = 0; ++#else ++ ++// The defaults are set here. Python 3 uses the fast C++ APIv2 by default. ++// Python 2 still uses the Python version by default until some compatibility ++// issues can be worked around. ++#if PY_MAJOR_VERSION >= 3 ++static int kImplVersion = 2; ++#else ++static int kImplVersion = 0; ++#endif ++ ++#endif // PYTHON_PROTO2_PYTHON_IMPL ++#endif // PYTHON_PROTO2_CPP_IMPL_V2 ++#endif // PYTHON_PROTO2_CPP_IMPL_V1 ++ ++static const char* kImplVersionName = "api_version"; ++ ++static const char* kModuleName = "_api_implementation"; ++static const char kModuleDocstring[] = ++"_api_implementation is a module that exposes compile-time constants that\n" ++"determine the default API implementation to use for Python proto2.\n" ++"\n" ++"It complements api_implementation.py by setting defaults using compile-time\n" ++"constants defined in C, such that one can set defaults at compilation\n" ++"(e.g. with blaze flag --copt=-DPYTHON_PROTO2_CPP_IMPL_V2)."; ++ ++#if PY_MAJOR_VERSION >= 3 ++static struct PyModuleDef _module = { ++ PyModuleDef_HEAD_INIT, ++ kModuleName, ++ kModuleDocstring, ++ -1, ++ NULL, ++ NULL, ++ NULL, ++ NULL, ++ NULL ++}; ++#define INITFUNC PyInit__api_implementation ++#define INITFUNC_ERRORVAL NULL ++#else ++#define INITFUNC init_api_implementation ++#define INITFUNC_ERRORVAL ++#endif ++ ++extern "C" { ++ PyMODINIT_FUNC INITFUNC() { ++#if PY_MAJOR_VERSION >= 3 ++ PyObject *module = PyModule_Create(&_module); ++#else ++ PyObject *module = Py_InitModule3( ++ const_cast(kModuleName), ++ NULL, ++ const_cast(kModuleDocstring)); ++#endif ++ if (module == NULL) { ++ return INITFUNC_ERRORVAL; ++ } ++ ++ // Adds the module variable "api_version". ++ if (PyModule_AddIntConstant( ++ module, ++ const_cast(kImplVersionName), ++ kImplVersion)) ++#if PY_MAJOR_VERSION < 3 ++ return; ++#else ++ { Py_DECREF(module); return NULL; } ++ ++ return module; ++#endif ++ } ++} ++ ++} // namespace python ++} // namespace protobuf ++} // namespace google +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/api_implementation.py +@@ -0,0 +1,89 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Determine which implementation of the protobuf API is used in this process. ++""" ++ ++import os ++import sys ++ ++try: ++ # pylint: disable=g-import-not-at-top ++ from google.protobuf.internal import _api_implementation ++ # The compile-time constants in the _api_implementation module can be used to ++ # switch to a certain implementation of the Python API at build time. ++ _api_version = _api_implementation.api_version ++ del _api_implementation ++except ImportError: ++ _api_version = 0 ++ ++_default_implementation_type = ( ++ 'python' if _api_version == 0 else 'cpp') ++_default_version_str = ( ++ '1' if _api_version <= 1 else '2') ++ ++# This environment variable can be used to switch to a certain implementation ++# of the Python API, overriding the compile-time constants in the ++# _api_implementation module. Right now only 'python' and 'cpp' are valid ++# values. Any other value will be ignored. ++_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', ++ _default_implementation_type) ++ ++if _implementation_type != 'python': ++ _implementation_type = 'cpp' ++ ++# This environment variable can be used to switch between the two ++# 'cpp' implementations, overriding the compile-time constants in the ++# _api_implementation module. Right now only 1 and 2 are valid values. Any other ++# value will be ignored. ++_implementation_version_str = os.getenv( ++ 'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION', ++ _default_version_str) ++ ++if _implementation_version_str not in ('1', '2'): ++ raise ValueError( ++ "unsupported PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION: '" + ++ _implementation_version_str + "' (supported versions: 1, 2)" ++ ) ++ ++_implementation_version = int(_implementation_version_str) ++ ++ ++# Usage of this function is discouraged. Clients shouldn't care which ++# implementation of the API is in use. Note that there is no guarantee ++# that differences between APIs will be maintained. ++# Please don't use this function if possible. ++def Type(): ++ return _implementation_type ++ ++ ++# See comment on 'Type' above. ++def Version(): ++ return _implementation_version +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/api_implementation_default_test.py +@@ -0,0 +1,63 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Test that the api_implementation defaults are what we expect.""" ++ ++import os ++import sys ++# Clear environment implementation settings before the google3 imports. ++os.environ.pop('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', None) ++os.environ.pop('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION', None) ++ ++# pylint: disable=g-import-not-at-top ++from google.apputils import basetest ++from google.protobuf.internal import api_implementation ++ ++ ++class ApiImplementationDefaultTest(basetest.TestCase): ++ ++ if sys.version_info.major <= 2: ++ ++ def testThatPythonIsTheDefault(self): ++ """If -DPYTHON_PROTO_*IMPL* was given at build time, this may fail.""" ++ self.assertEqual('python', api_implementation.Type()) ++ ++ else: ++ ++ def testThatCppApiV2IsTheDefault(self): ++ """If -DPYTHON_PROTO_*IMPL* was given at build time, this may fail.""" ++ self.assertEqual('cpp', api_implementation.Type()) ++ self.assertEqual(2, api_implementation.Version()) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/containers.py +@@ -0,0 +1,269 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Contains container classes to represent different protocol buffer types. ++ ++This file defines container classes which represent categories of protocol ++buffer field types which need extra maintenance. Currently these categories ++are: ++ - Repeated scalar fields - These are all repeated fields which aren't ++ composite (e.g. they are of simple types like int32, string, etc). ++ - Repeated composite fields - Repeated fields which are composite. This ++ includes groups and nested messages. ++""" ++ ++__author__ = 'petar@google.com (Petar Petrov)' ++ ++ ++class BaseContainer(object): ++ ++ """Base container class.""" ++ ++ # Minimizes memory usage and disallows assignment to other attributes. ++ __slots__ = ['_message_listener', '_values'] ++ ++ def __init__(self, message_listener): ++ """ ++ Args: ++ message_listener: A MessageListener implementation. ++ The RepeatedScalarFieldContainer will call this object's ++ Modified() method when it is modified. ++ """ ++ self._message_listener = message_listener ++ self._values = [] ++ ++ def __getitem__(self, key): ++ """Retrieves item by the specified key.""" ++ return self._values[key] ++ ++ def __len__(self): ++ """Returns the number of elements in the container.""" ++ return len(self._values) ++ ++ def __ne__(self, other): ++ """Checks if another instance isn't equal to this one.""" ++ # The concrete classes should define __eq__. ++ return not self == other ++ ++ def __hash__(self): ++ raise TypeError('unhashable object') ++ ++ def __repr__(self): ++ return repr(self._values) ++ ++ def sort(self, *args, **kwargs): ++ # Continue to support the old sort_function keyword argument. ++ # This is expected to be a rare occurrence, so use LBYL to avoid ++ # the overhead of actually catching KeyError. ++ if 'sort_function' in kwargs: ++ kwargs['cmp'] = kwargs.pop('sort_function') ++ self._values.sort(*args, **kwargs) ++ ++ ++class RepeatedScalarFieldContainer(BaseContainer): ++ ++ """Simple, type-checked, list-like container for holding repeated scalars.""" ++ ++ # Disallows assignment to other attributes. ++ __slots__ = ['_type_checker'] ++ ++ def __init__(self, message_listener, type_checker): ++ """ ++ Args: ++ message_listener: A MessageListener implementation. ++ The RepeatedScalarFieldContainer will call this object's ++ Modified() method when it is modified. ++ type_checker: A type_checkers.ValueChecker instance to run on elements ++ inserted into this container. ++ """ ++ super(RepeatedScalarFieldContainer, self).__init__(message_listener) ++ self._type_checker = type_checker ++ ++ def append(self, value): ++ """Appends an item to the list. Similar to list.append().""" ++ self._values.append(self._type_checker.CheckValue(value)) ++ if not self._message_listener.dirty: ++ self._message_listener.Modified() ++ ++ def insert(self, key, value): ++ """Inserts the item at the specified position. Similar to list.insert().""" ++ self._values.insert(key, self._type_checker.CheckValue(value)) ++ if not self._message_listener.dirty: ++ self._message_listener.Modified() ++ ++ def extend(self, elem_seq): ++ """Extends by appending the given sequence. Similar to list.extend().""" ++ if not elem_seq: ++ return ++ ++ new_values = [] ++ for elem in elem_seq: ++ new_values.append(self._type_checker.CheckValue(elem)) ++ self._values.extend(new_values) ++ self._message_listener.Modified() ++ ++ def MergeFrom(self, other): ++ """Appends the contents of another repeated field of the same type to this ++ one. We do not check the types of the individual fields. ++ """ ++ self._values.extend(other._values) ++ self._message_listener.Modified() ++ ++ def remove(self, elem): ++ """Removes an item from the list. Similar to list.remove().""" ++ self._values.remove(elem) ++ self._message_listener.Modified() ++ ++ def __setitem__(self, key, value): ++ """Sets the item on the specified position.""" ++ if isinstance(key, slice): # PY3 ++ if key.step is not None: ++ raise ValueError('Extended slices not supported') ++ self.__setslice__(key.start, key.stop, value) ++ else: ++ self._values[key] = self._type_checker.CheckValue(value) ++ self._message_listener.Modified() ++ ++ def __getslice__(self, start, stop): ++ """Retrieves the subset of items from between the specified indices.""" ++ return self._values[start:stop] ++ ++ def __setslice__(self, start, stop, values): ++ """Sets the subset of items from between the specified indices.""" ++ new_values = [] ++ for value in values: ++ new_values.append(self._type_checker.CheckValue(value)) ++ self._values[start:stop] = new_values ++ self._message_listener.Modified() ++ ++ def __delitem__(self, key): ++ """Deletes the item at the specified position.""" ++ del self._values[key] ++ self._message_listener.Modified() ++ ++ def __delslice__(self, start, stop): ++ """Deletes the subset of items from between the specified indices.""" ++ del self._values[start:stop] ++ self._message_listener.Modified() ++ ++ def __eq__(self, other): ++ """Compares the current instance with another one.""" ++ if self is other: ++ return True ++ # Special case for the same type which should be common and fast. ++ if isinstance(other, self.__class__): ++ return other._values == self._values ++ # We are presumably comparing against some other sequence type. ++ return other == self._values ++ ++ ++class RepeatedCompositeFieldContainer(BaseContainer): ++ ++ """Simple, list-like container for holding repeated composite fields.""" ++ ++ # Disallows assignment to other attributes. ++ __slots__ = ['_message_descriptor'] ++ ++ def __init__(self, message_listener, message_descriptor): ++ """ ++ Note that we pass in a descriptor instead of the generated directly, ++ since at the time we construct a _RepeatedCompositeFieldContainer we ++ haven't yet necessarily initialized the type that will be contained in the ++ container. ++ ++ Args: ++ message_listener: A MessageListener implementation. ++ The RepeatedCompositeFieldContainer will call this object's ++ Modified() method when it is modified. ++ message_descriptor: A Descriptor instance describing the protocol type ++ that should be present in this container. We'll use the ++ _concrete_class field of this descriptor when the client calls add(). ++ """ ++ super(RepeatedCompositeFieldContainer, self).__init__(message_listener) ++ self._message_descriptor = message_descriptor ++ ++ def add(self, **kwargs): ++ """Adds a new element at the end of the list and returns it. Keyword ++ arguments may be used to initialize the element. ++ """ ++ new_element = self._message_descriptor._concrete_class(**kwargs) ++ new_element._SetListener(self._message_listener) ++ self._values.append(new_element) ++ if not self._message_listener.dirty: ++ self._message_listener.Modified() ++ return new_element ++ ++ def extend(self, elem_seq): ++ """Extends by appending the given sequence of elements of the same type ++ as this one, copying each individual message. ++ """ ++ message_class = self._message_descriptor._concrete_class ++ listener = self._message_listener ++ values = self._values ++ for message in elem_seq: ++ new_element = message_class() ++ new_element._SetListener(listener) ++ new_element.MergeFrom(message) ++ values.append(new_element) ++ listener.Modified() ++ ++ def MergeFrom(self, other): ++ """Appends the contents of another repeated field of the same type to this ++ one, copying each individual message. ++ """ ++ self.extend(other._values) ++ ++ def remove(self, elem): ++ """Removes an item from the list. Similar to list.remove().""" ++ self._values.remove(elem) ++ self._message_listener.Modified() ++ ++ def __getslice__(self, start, stop): ++ """Retrieves the subset of items from between the specified indices.""" ++ return self._values[start:stop] ++ ++ def __delitem__(self, key): ++ """Deletes the item at the specified position.""" ++ del self._values[key] ++ self._message_listener.Modified() ++ ++ def __delslice__(self, start, stop): ++ """Deletes the subset of items from between the specified indices.""" ++ del self._values[start:stop] ++ self._message_listener.Modified() ++ ++ def __eq__(self, other): ++ """Compares the current instance with another one.""" ++ if self is other: ++ return True ++ if not isinstance(other, self.__class__): ++ raise TypeError('Can only compare repeated composite fields against ' ++ 'other repeated composite fields.') ++ return self._values == other._values +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/cpp_message.py +@@ -0,0 +1,667 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Contains helper functions used to create protocol message classes from ++Descriptor objects at runtime backed by the protocol buffer C++ API. ++""" ++ ++__author__ = 'petar@google.com (Petar Petrov)' ++ ++import collections ++import operator ++ ++import six ++import six.moves.copyreg ++ ++from google.protobuf.internal import _net_proto2___python ++from google.protobuf.internal import enum_type_wrapper ++from google.protobuf import message ++ ++ ++_LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED ++_LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL ++_CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE ++_TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE ++ ++ ++def GetDescriptorPool(): ++ """Creates a new DescriptorPool C++ object.""" ++ return _net_proto2___python.NewCDescriptorPool() ++ ++ ++_pool = GetDescriptorPool() ++ ++ ++def GetFieldDescriptor(full_field_name): ++ """Searches for a field descriptor given a full field name.""" ++ return _pool.FindFieldByName(full_field_name) ++ ++ ++def BuildFile(content): ++ """Registers a new proto file in the underlying C++ descriptor pool.""" ++ _net_proto2___python.BuildFile(content) ++ ++ ++def GetExtensionDescriptor(full_extension_name): ++ """Searches for extension descriptor given a full field name.""" ++ return _pool.FindExtensionByName(full_extension_name) ++ ++ ++def NewCMessage(full_message_name): ++ """Creates a new C++ protocol message by its name.""" ++ return _net_proto2___python.NewCMessage(full_message_name) ++ ++ ++def ScalarProperty(cdescriptor): ++ """Returns a scalar property for the given descriptor.""" ++ ++ def Getter(self): ++ return self._cmsg.GetScalar(cdescriptor) ++ ++ def Setter(self, value): ++ self._cmsg.SetScalar(cdescriptor, value) ++ ++ return property(Getter, Setter) ++ ++ ++def CompositeProperty(cdescriptor, message_type): ++ """Returns a Python property the given composite field.""" ++ ++ def Getter(self): ++ sub_message = self._composite_fields.get(cdescriptor.name, None) ++ if sub_message is None: ++ cmessage = self._cmsg.NewSubMessage(cdescriptor) ++ sub_message = message_type._concrete_class(__cmessage=cmessage) ++ self._composite_fields[cdescriptor.name] = sub_message ++ return sub_message ++ ++ return property(Getter) ++ ++ ++class RepeatedScalarContainer(object): ++ """Container for repeated scalar fields.""" ++ ++ __slots__ = ['_message', '_cfield_descriptor', '_cmsg'] ++ ++ def __init__(self, msg, cfield_descriptor): ++ self._message = msg ++ self._cmsg = msg._cmsg ++ self._cfield_descriptor = cfield_descriptor ++ ++ def append(self, value): ++ self._cmsg.AddRepeatedScalar( ++ self._cfield_descriptor, value) ++ ++ def extend(self, sequence): ++ for element in sequence: ++ self.append(element) ++ ++ def insert(self, key, value): ++ values = self[slice(None, None, None)] ++ values.insert(key, value) ++ self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values) ++ ++ def remove(self, value): ++ values = self[slice(None, None, None)] ++ values.remove(value) ++ self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values) ++ ++ def __setitem__(self, key, value): ++ values = self[slice(None, None, None)] ++ values[key] = value ++ self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values) ++ ++ def __getitem__(self, key): ++ return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key) ++ ++ def __delitem__(self, key): ++ self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key) ++ ++ def __len__(self): ++ return len(self[slice(None, None, None)]) ++ ++ def __eq__(self, other): ++ if self is other: ++ return True ++ if not isinstance(other, collections.Sequence): ++ raise TypeError( ++ 'Can only compare repeated scalar fields against sequences.') ++ # We are presumably comparing against some other sequence type. ++ return other == self[slice(None, None, None)] ++ ++ def __ne__(self, other): ++ return not self == other ++ ++ def __hash__(self): ++ raise TypeError('unhashable object') ++ ++ def sort(self, *args, **kwargs): ++ # Maintain compatibility with the previous interface. ++ if 'sort_function' in kwargs: ++ kwargs['cmp'] = kwargs.pop('sort_function') ++ self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, ++ sorted(self, *args, **kwargs)) ++ ++ ++def RepeatedScalarProperty(cdescriptor): ++ """Returns a Python property the given repeated scalar field.""" ++ ++ def Getter(self): ++ container = self._composite_fields.get(cdescriptor.name, None) ++ if container is None: ++ container = RepeatedScalarContainer(self, cdescriptor) ++ self._composite_fields[cdescriptor.name] = container ++ return container ++ ++ def Setter(self, new_value): ++ raise AttributeError('Assignment not allowed to repeated field ' ++ '"%s" in protocol message object.' % cdescriptor.name) ++ ++ doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name ++ return property(Getter, Setter, doc=doc) ++ ++ ++class RepeatedCompositeContainer(object): ++ """Container for repeated composite fields.""" ++ ++ __slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg'] ++ ++ def __init__(self, msg, cfield_descriptor, subclass): ++ self._message = msg ++ self._cmsg = msg._cmsg ++ self._subclass = subclass ++ self._cfield_descriptor = cfield_descriptor ++ ++ def add(self, **kwargs): ++ cmessage = self._cmsg.AddMessage(self._cfield_descriptor) ++ return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs) ++ ++ def extend(self, elem_seq): ++ """Extends by appending the given sequence of elements of the same type ++ as this one, copying each individual message. ++ """ ++ for message in elem_seq: ++ self.add().MergeFrom(message) ++ ++ def remove(self, value): ++ # TODO(protocol-devel): This is inefficient as it needs to generate a ++ # message pointer for each message only to do index(). Move this to a C++ ++ # extension function. ++ self.__delitem__(self[slice(None, None, None)].index(value)) ++ ++ def MergeFrom(self, other): ++ for message in other[:]: ++ self.add().MergeFrom(message) ++ ++ def __getitem__(self, key): ++ cmessages = self._cmsg.GetRepeatedMessage( ++ self._cfield_descriptor, key) ++ subclass = self._subclass ++ if not isinstance(cmessages, list): ++ return subclass(__cmessage=cmessages, __owner=self._message) ++ ++ return [subclass(__cmessage=m, __owner=self._message) for m in cmessages] ++ ++ def __delitem__(self, key): ++ self._cmsg.DeleteRepeatedField( ++ self._cfield_descriptor, key) ++ ++ def __len__(self): ++ return self._cmsg.FieldLength(self._cfield_descriptor) ++ ++ def __eq__(self, other): ++ """Compares the current instance with another one.""" ++ if self is other: ++ return True ++ if not isinstance(other, self.__class__): ++ raise TypeError('Can only compare repeated composite fields against ' ++ 'other repeated composite fields.') ++ messages = self[slice(None, None, None)] ++ other_messages = other[slice(None, None, None)] ++ return messages == other_messages ++ ++ def __hash__(self): ++ raise TypeError('unhashable object') ++ ++ def sort(self, cmp=None, key=None, reverse=False, **kwargs): ++ # Maintain compatibility with the old interface. ++ if cmp is None and 'sort_function' in kwargs: ++ cmp = kwargs.pop('sort_function') ++ ++ # The cmp function, if provided, is passed the results of the key function, ++ # so we only need to wrap one of them. ++ if key is None: ++ index_key = self.__getitem__ ++ else: ++ index_key = lambda i: key(self[i]) ++ ++ # Sort the list of current indexes by the underlying object. ++ indexes = list(range(len(self))) ++ indexes.sort(cmp=cmp, key=index_key, reverse=reverse) ++ ++ # Apply the transposition. ++ for dest, src in enumerate(indexes): ++ if dest == src: ++ continue ++ self._cmsg.SwapRepeatedFieldElements(self._cfield_descriptor, dest, src) ++ # Don't swap the same value twice. ++ indexes[src] = src ++ ++ ++def RepeatedCompositeProperty(cdescriptor, message_type): ++ """Returns a Python property for the given repeated composite field.""" ++ ++ def Getter(self): ++ container = self._composite_fields.get(cdescriptor.name, None) ++ if container is None: ++ container = RepeatedCompositeContainer( ++ self, cdescriptor, message_type._concrete_class) ++ self._composite_fields[cdescriptor.name] = container ++ return container ++ ++ def Setter(self, new_value): ++ raise AttributeError('Assignment not allowed to repeated field ' ++ '"%s" in protocol message object.' % cdescriptor.name) ++ ++ doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name ++ return property(Getter, Setter, doc=doc) ++ ++ ++class ExtensionDict(object): ++ """Extension dictionary added to each protocol message.""" ++ ++ def __init__(self, msg): ++ self._message = msg ++ self._cmsg = msg._cmsg ++ self._values = {} ++ ++ def __setitem__(self, extension, value): ++ from google.protobuf import descriptor ++ if not isinstance(extension, descriptor.FieldDescriptor): ++ raise KeyError('Bad extension %r.' % (extension,)) ++ cdescriptor = extension._cdescriptor ++ if (cdescriptor.label != _LABEL_OPTIONAL or ++ cdescriptor.cpp_type == _CPPTYPE_MESSAGE): ++ raise TypeError('Extension %r is repeated and/or a composite type.' % ( ++ extension.full_name,)) ++ self._cmsg.SetScalar(cdescriptor, value) ++ self._values[extension] = value ++ ++ def __getitem__(self, extension): ++ from google.protobuf import descriptor ++ if not isinstance(extension, descriptor.FieldDescriptor): ++ raise KeyError('Bad extension %r.' % (extension,)) ++ ++ cdescriptor = extension._cdescriptor ++ if (cdescriptor.label != _LABEL_REPEATED and ++ cdescriptor.cpp_type != _CPPTYPE_MESSAGE): ++ return self._cmsg.GetScalar(cdescriptor) ++ ++ ext = self._values.get(extension, None) ++ if ext is not None: ++ return ext ++ ++ ext = self._CreateNewHandle(extension) ++ self._values[extension] = ext ++ return ext ++ ++ def ClearExtension(self, extension): ++ from google.protobuf import descriptor ++ if not isinstance(extension, descriptor.FieldDescriptor): ++ raise KeyError('Bad extension %r.' % (extension,)) ++ self._cmsg.ClearFieldByDescriptor(extension._cdescriptor) ++ if extension in self._values: ++ del self._values[extension] ++ ++ def HasExtension(self, extension): ++ from google.protobuf import descriptor ++ if not isinstance(extension, descriptor.FieldDescriptor): ++ raise KeyError('Bad extension %r.' % (extension,)) ++ return self._cmsg.HasFieldByDescriptor(extension._cdescriptor) ++ ++ def _FindExtensionByName(self, name): ++ """Tries to find a known extension with the specified name. ++ ++ Args: ++ name: Extension full name. ++ ++ Returns: ++ Extension field descriptor. ++ """ ++ return self._message._extensions_by_name.get(name, None) ++ ++ def _CreateNewHandle(self, extension): ++ cdescriptor = extension._cdescriptor ++ if (cdescriptor.label != _LABEL_REPEATED and ++ cdescriptor.cpp_type == _CPPTYPE_MESSAGE): ++ cmessage = self._cmsg.NewSubMessage(cdescriptor) ++ return extension.message_type._concrete_class(__cmessage=cmessage) ++ ++ if cdescriptor.label == _LABEL_REPEATED: ++ if cdescriptor.cpp_type == _CPPTYPE_MESSAGE: ++ return RepeatedCompositeContainer( ++ self._message, cdescriptor, extension.message_type._concrete_class) ++ else: ++ return RepeatedScalarContainer(self._message, cdescriptor) ++ # This shouldn't happen! ++ assert False ++ return None ++ ++ ++def NewMessage(bases, message_descriptor, dictionary): ++ """Creates a new protocol message *class*.""" ++ _AddClassAttributesForNestedExtensions(message_descriptor, dictionary) ++ _AddEnumValues(message_descriptor, dictionary) ++ _AddDescriptors(message_descriptor, dictionary) ++ return bases ++ ++ ++def InitMessage(message_descriptor, cls): ++ """Constructs a new message instance (called before instance's __init__).""" ++ cls._extensions_by_name = {} ++ _AddInitMethod(message_descriptor, cls) ++ _AddMessageMethods(message_descriptor, cls) ++ _AddPropertiesForExtensions(message_descriptor, cls) ++ six.moves.copyreg.pickle(cls, lambda obj: (cls, (), obj.__getstate__())) ++ ++ ++def _AddDescriptors(message_descriptor, dictionary): ++ """Sets up a new protocol message class dictionary. ++ ++ Args: ++ message_descriptor: A Descriptor instance describing this message type. ++ dictionary: Class dictionary to which we'll add a '__slots__' entry. ++ """ ++ dictionary['__descriptors'] = {} ++ for field in message_descriptor.fields: ++ dictionary['__descriptors'][field.name] = GetFieldDescriptor( ++ field.full_name) ++ ++ dictionary['__slots__'] = list(dictionary['__descriptors'].keys()) + [ ++ '_cmsg', '_owner', '_composite_fields', 'Extensions', '_HACK_REFCOUNTS'] ++ ++ ++def _AddEnumValues(message_descriptor, dictionary): ++ """Sets class-level attributes for all enum fields defined in this message. ++ ++ Args: ++ message_descriptor: Descriptor object for this message type. ++ dictionary: Class dictionary that should be populated. ++ """ ++ for enum_type in message_descriptor.enum_types: ++ dictionary[enum_type.name] = enum_type_wrapper.EnumTypeWrapper(enum_type) ++ for enum_value in enum_type.values: ++ dictionary[enum_value.name] = enum_value.number ++ ++ ++def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary): ++ """Adds class attributes for the nested extensions.""" ++ extension_dict = message_descriptor.extensions_by_name ++ for extension_name, extension_field in extension_dict.items(): ++ assert extension_name not in dictionary ++ dictionary[extension_name] = extension_field ++ ++ ++def _AddInitMethod(message_descriptor, cls): ++ """Adds an __init__ method to cls.""" ++ ++ # Create and attach message field properties to the message class. ++ # This can be done just once per message class, since property setters and ++ # getters are passed the message instance. ++ # This makes message instantiation extremely fast, and at the same time it ++ # doesn't require the creation of property objects for each message instance, ++ # which saves a lot of memory. ++ for field in message_descriptor.fields: ++ field_cdescriptor = cls.__descriptors[field.name] ++ if field.label == _LABEL_REPEATED: ++ if field.cpp_type == _CPPTYPE_MESSAGE: ++ value = RepeatedCompositeProperty(field_cdescriptor, field.message_type) ++ else: ++ value = RepeatedScalarProperty(field_cdescriptor) ++ elif field.cpp_type == _CPPTYPE_MESSAGE: ++ value = CompositeProperty(field_cdescriptor, field.message_type) ++ else: ++ value = ScalarProperty(field_cdescriptor) ++ setattr(cls, field.name, value) ++ ++ # Attach a constant with the field number. ++ constant_name = field.name.upper() + '_FIELD_NUMBER' ++ setattr(cls, constant_name, field.number) ++ ++ def Init(self, **kwargs): ++ """Message constructor.""" ++ cmessage = kwargs.pop('__cmessage', None) ++ if cmessage: ++ self._cmsg = cmessage ++ else: ++ self._cmsg = NewCMessage(message_descriptor.full_name) ++ ++ # Keep a reference to the owner, as the owner keeps a reference to the ++ # underlying protocol buffer message. ++ owner = kwargs.pop('__owner', None) ++ if owner: ++ self._owner = owner ++ ++ if message_descriptor.is_extendable: ++ self.Extensions = ExtensionDict(self) ++ else: ++ # Reference counting in the C++ code is broken and depends on ++ # the Extensions reference to keep this object alive during unit ++ # tests (see b/4856052). Remove this once b/4945904 is fixed. ++ self._HACK_REFCOUNTS = self ++ self._composite_fields = {} ++ ++ for field_name, field_value in kwargs.items(): ++ field_cdescriptor = self.__descriptors.get(field_name, None) ++ if not field_cdescriptor: ++ raise ValueError('Protocol message has no "%s" field.' % field_name) ++ if field_cdescriptor.label == _LABEL_REPEATED: ++ if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE: ++ field_name = getattr(self, field_name) ++ for val in field_value: ++ field_name.add().MergeFrom(val) ++ else: ++ getattr(self, field_name).extend(field_value) ++ elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE: ++ getattr(self, field_name).MergeFrom(field_value) ++ else: ++ setattr(self, field_name, field_value) ++ ++ Init.__module__ = None ++ Init.__doc__ = None ++ cls.__init__ = Init ++ ++ ++def _IsMessageSetExtension(field): ++ """Checks if a field is a message set extension.""" ++ return (field.is_extension and ++ field.containing_type.has_options and ++ field.containing_type.GetOptions().message_set_wire_format and ++ field.type == _TYPE_MESSAGE and ++ field.message_type == field.extension_scope and ++ field.label == _LABEL_OPTIONAL) ++ ++ ++def _AddMessageMethods(message_descriptor, cls): ++ """Adds the methods to a protocol message class.""" ++ if message_descriptor.is_extendable: ++ ++ def ClearExtension(self, extension): ++ self.Extensions.ClearExtension(extension) ++ ++ def HasExtension(self, extension): ++ return self.Extensions.HasExtension(extension) ++ ++ def HasField(self, field_name): ++ return self._cmsg.HasField(field_name) ++ ++ def ClearField(self, field_name): ++ child_cmessage = None ++ if field_name in self._composite_fields: ++ child_field = self._composite_fields[field_name] ++ del self._composite_fields[field_name] ++ ++ child_cdescriptor = self.__descriptors[field_name] ++ # TODO(anuraag): Support clearing repeated message fields as well. ++ if (child_cdescriptor.label != _LABEL_REPEATED and ++ child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE): ++ child_field._owner = None ++ child_cmessage = child_field._cmsg ++ ++ if child_cmessage is not None: ++ self._cmsg.ClearField(field_name, child_cmessage) ++ else: ++ self._cmsg.ClearField(field_name) ++ ++ def Clear(self): ++ cmessages_to_release = [] ++ for field_name, child_field in self._composite_fields.items(): ++ child_cdescriptor = self.__descriptors[field_name] ++ # TODO(anuraag): Support clearing repeated message fields as well. ++ if (child_cdescriptor.label != _LABEL_REPEATED and ++ child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE): ++ child_field._owner = None ++ cmessages_to_release.append((child_cdescriptor, child_field._cmsg)) ++ self._composite_fields.clear() ++ self._cmsg.Clear(cmessages_to_release) ++ ++ def IsInitialized(self, errors=None): ++ if self._cmsg.IsInitialized(): ++ return True ++ if errors is not None: ++ errors.extend(self.FindInitializationErrors()); ++ return False ++ ++ def SerializeToString(self): ++ if not self.IsInitialized(): ++ raise message.EncodeError( ++ 'Message %s is missing required fields: %s' % ( ++ self._cmsg.full_name, ','.join(self.FindInitializationErrors()))) ++ return self._cmsg.SerializeToString() ++ ++ def SerializePartialToString(self): ++ return self._cmsg.SerializePartialToString() ++ ++ def ParseFromString(self, serialized): ++ self.Clear() ++ self.MergeFromString(serialized) ++ ++ def MergeFromString(self, serialized): ++ byte_size = self._cmsg.MergeFromString(serialized) ++ if byte_size < 0: ++ raise message.DecodeError('Unable to merge from string.') ++ return byte_size ++ ++ def MergeFrom(self, msg): ++ if not isinstance(msg, cls): ++ raise TypeError( ++ "Parameter to MergeFrom() must be instance of same class: " ++ "expected %s got %s." % (cls.__name__, type(msg).__name__)) ++ self._cmsg.MergeFrom(msg._cmsg) ++ ++ def CopyFrom(self, msg): ++ self._cmsg.CopyFrom(msg._cmsg) ++ ++ def ByteSize(self): ++ return self._cmsg.ByteSize() ++ ++ def SetInParent(self): ++ return self._cmsg.SetInParent() ++ ++ def ListFields(self): ++ all_fields = [] ++ field_list = self._cmsg.ListFields() ++ fields_by_name = cls.DESCRIPTOR.fields_by_name ++ for is_extension, field_name in field_list: ++ if is_extension: ++ extension = cls._extensions_by_name[field_name] ++ all_fields.append((extension, self.Extensions[extension])) ++ else: ++ field_descriptor = fields_by_name[field_name] ++ all_fields.append( ++ (field_descriptor, getattr(self, field_name))) ++ all_fields.sort(key=lambda item: item[0].number) ++ return all_fields ++ ++ def FindInitializationErrors(self): ++ return self._cmsg.FindInitializationErrors() ++ ++ def __str__(self): ++ return str(self._cmsg) ++ ++ def __eq__(self, other): ++ if self is other: ++ return True ++ if not isinstance(other, self.__class__): ++ return False ++ return self.ListFields() == other.ListFields() ++ ++ def __ne__(self, other): ++ return not self == other ++ ++ def __hash__(self): ++ raise TypeError('unhashable object') ++ ++ def __unicode__(self): ++ # Lazy import to prevent circular import when text_format imports this file. ++ from google.protobuf import text_format ++ return text_format.MessageToString(self, as_utf8=True).decode('utf-8') ++ ++ # Attach the local methods to the message class. ++ for key, value in locals().copy().items(): ++ if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'): ++ setattr(cls, key, value) ++ ++ # Static methods: ++ ++ def RegisterExtension(extension_handle): ++ extension_handle.containing_type = cls.DESCRIPTOR ++ cls._extensions_by_name[extension_handle.full_name] = extension_handle ++ ++ if _IsMessageSetExtension(extension_handle): ++ # MessageSet extension. Also register under type name. ++ cls._extensions_by_name[ ++ extension_handle.message_type.full_name] = extension_handle ++ cls.RegisterExtension = staticmethod(RegisterExtension) ++ ++ def FromString(string): ++ msg = cls() ++ msg.MergeFromString(string) ++ return msg ++ cls.FromString = staticmethod(FromString) ++ ++ ++ ++def _AddPropertiesForExtensions(message_descriptor, cls): ++ """Adds properties for all fields in this protocol message type.""" ++ extension_dict = message_descriptor.extensions_by_name ++ for extension_name, extension_field in extension_dict.items(): ++ constant_name = extension_name.upper() + '_FIELD_NUMBER' ++ setattr(cls, constant_name, extension_field.number) +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/decoder.py +@@ -0,0 +1,815 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# Copyright 2009 Google Inc. All Rights Reserved. ++ ++"""Code for decoding protocol buffer primitives. ++ ++This code is very similar to encoder.py -- read the docs for that module first. ++ ++A "decoder" is a function with the signature: ++ Decode(buffer, pos, end, message, field_dict) ++The arguments are: ++ buffer: The string containing the encoded message. ++ pos: The current position in the string. ++ end: The position in the string where the current message ends. May be ++ less than len(buffer) if we're reading a sub-message. ++ message: The message object into which we're parsing. ++ field_dict: message._fields (avoids a hashtable lookup). ++The decoder reads the field and stores it into field_dict, returning the new ++buffer position. A decoder for a repeated field may proactively decode all of ++the elements of that field, if they appear consecutively. ++ ++Note that decoders may throw any of the following: ++ IndexError: Indicates a truncated message. ++ struct.error: Unpacking of a fixed-width field failed. ++ message.DecodeError: Other errors. ++ ++Decoders are expected to raise an exception if they are called with pos > end. ++This allows callers to be lax about bounds checking: it's fineto read past ++"end" as long as you are sure that someone else will notice and throw an ++exception later on. ++ ++Something up the call stack is expected to catch IndexError and struct.error ++and convert them to message.DecodeError. ++ ++Decoders are constructed using decoder constructors with the signature: ++ MakeDecoder(field_number, is_repeated, is_packed, key, new_default) ++The arguments are: ++ field_number: The field number of the field we want to decode. ++ is_repeated: Is the field a repeated field? (bool) ++ is_packed: Is the field a packed field? (bool) ++ key: The key to use when looking up the field within field_dict. ++ (This is actually the FieldDescriptor but nothing in this ++ file should depend on that.) ++ new_default: A function which takes a message object as a parameter and ++ returns a new instance of the default value for this field. ++ (This is called for repeated fields and sub-messages, when an ++ instance does not already exist.) ++ ++As with encoders, we define a decoder constructor for every type of field. ++Then, for every field of every message class we construct an actual decoder. ++That decoder goes into a dict indexed by tag, so when we decode a message ++we repeatedly read a tag, look up the corresponding decoder, and invoke it. ++""" ++ ++__author__ = 'kenton@google.com (Kenton Varda)' ++ ++import struct ++ ++import six ++ ++if six.PY3: ++ long = int ++ ++from google.protobuf.internal import encoder ++from google.protobuf.internal import wire_format ++from google.protobuf import message ++ ++ ++# This will overflow and thus become IEEE-754 "infinity". We would use ++# "float('inf')" but it doesn't work on Windows pre-Python-2.6. ++_POS_INF = 1e10000 ++_NEG_INF = -_POS_INF ++_NAN = _POS_INF * 0 ++ ++ ++# This is not for optimization, but rather to avoid conflicts with local ++# variables named "message". ++_DecodeError = message.DecodeError ++ ++ ++def _VarintDecoder(mask, result_type): ++ """Return an encoder for a basic varint value (does not include tag). ++ ++ Decoded values will be bitwise-anded with the given mask before being ++ returned, e.g. to limit them to 32 bits. The returned decoder does not ++ take the usual "end" parameter -- the caller is expected to do bounds checking ++ after the fact (often the caller can defer such checking until later). The ++ decoder returns a (value, new_pos) pair. ++ """ ++ ++ def DecodeVarint(buffer, pos): ++ result = 0 ++ shift = 0 ++ while 1: ++ b = six.indexbytes(buffer, pos) ++ result |= ((b & 0x7f) << shift) ++ pos += 1 ++ if not (b & 0x80): ++ result &= mask ++ result = result_type(result) ++ return (result, pos) ++ shift += 7 ++ if shift >= 64: ++ raise _DecodeError('Too many bytes when decoding varint.') ++ return DecodeVarint ++ ++ ++def _SignedVarintDecoder(mask, result_type): ++ """Like _VarintDecoder() but decodes signed values.""" ++ ++ def DecodeVarint(buffer, pos): ++ result = 0 ++ shift = 0 ++ while 1: ++ b = six.indexbytes(buffer, pos) ++ result |= ((b & 0x7f) << shift) ++ pos += 1 ++ if not (b & 0x80): ++ if result > 0x7fffffffffffffff: ++ result -= (1 << 64) ++ result |= ~mask ++ else: ++ result &= mask ++ result = result_type(result) ++ return (result, pos) ++ shift += 7 ++ if shift >= 64: ++ raise _DecodeError('Too many bytes when decoding varint.') ++ return DecodeVarint ++ ++# We force 32-bit values to int and 64-bit values to long to make ++# alternate implementations where the distinction is more significant ++# (e.g. the C++ implementation) simpler. ++ ++_DecodeVarint = _VarintDecoder((1 << 64) - 1, long) ++_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1, long) ++ ++# Use these versions for values which must be limited to 32 bits. ++_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int) ++_DecodeSignedVarint32 = _SignedVarintDecoder((1 << 32) - 1, int) ++ ++ ++def ReadTag(buffer, pos): ++ """Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple. ++ ++ We return the raw bytes of the tag rather than decoding them. The raw ++ bytes can then be used to look up the proper decoder. This effectively allows ++ us to trade some work that would be done in pure-python (decoding a varint) ++ for work that is done in C (searching for a byte string in a hash table). ++ In a low-level language it would be much cheaper to decode the varint and ++ use that, but not in Python. ++ """ ++ ++ start = pos ++ while six.indexbytes(buffer, pos) & 0x80: ++ pos += 1 ++ pos += 1 ++ return (buffer[start:pos], pos) ++ ++ ++# -------------------------------------------------------------------- ++ ++ ++def _SimpleDecoder(wire_type, decode_value): ++ """Return a constructor for a decoder for fields of a particular type. ++ ++ Args: ++ wire_type: The field's wire type. ++ decode_value: A function which decodes an individual value, e.g. ++ _DecodeVarint() ++ """ ++ ++ def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default): ++ if is_packed: ++ local_DecodeVarint = _DecodeVarint ++ def DecodePackedField(buffer, pos, end, message, field_dict): ++ value = field_dict.get(key) ++ if value is None: ++ value = field_dict.setdefault(key, new_default(message)) ++ (endpoint, pos) = local_DecodeVarint(buffer, pos) ++ endpoint += pos ++ if endpoint > end: ++ raise _DecodeError('Truncated message.') ++ while pos < endpoint: ++ (element, pos) = decode_value(buffer, pos) ++ value.append(element) ++ if pos > endpoint: ++ del value[-1] # Discard corrupt value. ++ raise _DecodeError('Packed element was truncated.') ++ return pos ++ return DecodePackedField ++ elif is_repeated: ++ tag_bytes = encoder.TagBytes(field_number, wire_type) ++ tag_len = len(tag_bytes) ++ def DecodeRepeatedField(buffer, pos, end, message, field_dict): ++ value = field_dict.get(key) ++ if value is None: ++ value = field_dict.setdefault(key, new_default(message)) ++ while 1: ++ (element, new_pos) = decode_value(buffer, pos) ++ value.append(element) ++ # Predict that the next tag is another copy of the same repeated ++ # field. ++ pos = new_pos + tag_len ++ if buffer[new_pos:pos] != tag_bytes or new_pos >= end: ++ # Prediction failed. Return. ++ if new_pos > end: ++ raise _DecodeError('Truncated message.') ++ return new_pos ++ return DecodeRepeatedField ++ else: ++ def DecodeField(buffer, pos, end, message, field_dict): ++ (field_dict[key], pos) = decode_value(buffer, pos) ++ if pos > end: ++ del field_dict[key] # Discard corrupt value. ++ raise _DecodeError('Truncated message.') ++ return pos ++ return DecodeField ++ ++ return SpecificDecoder ++ ++ ++def _ModifiedDecoder(wire_type, decode_value, modify_value): ++ """Like SimpleDecoder but additionally invokes modify_value on every value ++ before storing it. Usually modify_value is ZigZagDecode. ++ """ ++ ++ # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but ++ # not enough to make a significant difference. ++ ++ def InnerDecode(buffer, pos): ++ (result, new_pos) = decode_value(buffer, pos) ++ return (modify_value(result), new_pos) ++ return _SimpleDecoder(wire_type, InnerDecode) ++ ++ ++def _StructPackDecoder(wire_type, format): ++ """Return a constructor for a decoder for a fixed-width field. ++ ++ Args: ++ wire_type: The field's wire type. ++ format: The format string to pass to struct.unpack(). ++ """ ++ ++ value_size = struct.calcsize(format) ++ local_unpack = struct.unpack ++ ++ # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but ++ # not enough to make a significant difference. ++ ++ # Note that we expect someone up-stack to catch struct.error and convert ++ # it to _DecodeError -- this way we don't have to set up exception- ++ # handling blocks every time we parse one value. ++ ++ def InnerDecode(buffer, pos): ++ new_pos = pos + value_size ++ result = local_unpack(format, buffer[pos:new_pos])[0] ++ return (result, new_pos) ++ return _SimpleDecoder(wire_type, InnerDecode) ++ ++ ++def _FloatDecoder(): ++ """Returns a decoder for a float field. ++ ++ This code works around a bug in struct.unpack for non-finite 32-bit ++ floating-point values. ++ """ ++ ++ local_unpack = struct.unpack ++ ++ def InnerDecode(buffer, pos): ++ # We expect a 32-bit value in little-endian byte order. Bit 1 is the sign ++ # bit, bits 2-9 represent the exponent, and bits 10-32 are the significand. ++ new_pos = pos + 4 ++ float_bytes = buffer[pos:new_pos] ++ ++ # If this value has all its exponent bits set, then it's non-finite. ++ # In Python 2.4, struct.unpack will convert it to a finite 64-bit value. ++ # To avoid that, we parse it specially. ++ if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'): ++ # If at least one significand bit is set... ++ if float_bytes[0:3] != b'\x00\x00\x80': ++ return (_NAN, new_pos) ++ # If sign bit is set... ++ if float_bytes[3:4] == b'\xFF': ++ return (_NEG_INF, new_pos) ++ return (_POS_INF, new_pos) ++ ++ # Note that we expect someone up-stack to catch struct.error and convert ++ # it to _DecodeError -- this way we don't have to set up exception- ++ # handling blocks every time we parse one value. ++ result = local_unpack('= b'\xF0') ++ and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')): ++ return (_NAN, new_pos) ++ ++ # Note that we expect someone up-stack to catch struct.error and convert ++ # it to _DecodeError -- this way we don't have to set up exception- ++ # handling blocks every time we parse one value. ++ result = local_unpack(' end: ++ raise _DecodeError('Truncated message.') ++ while pos < endpoint: ++ value_start_pos = pos ++ (element, pos) = _DecodeSignedVarint32(buffer, pos) ++ if element in enum_type.values_by_number: ++ value.append(element) ++ else: ++ if not message._unknown_fields: ++ message._unknown_fields = [] ++ tag_bytes = encoder.TagBytes(field_number, ++ wire_format.WIRETYPE_VARINT) ++ message._unknown_fields.append( ++ (tag_bytes, buffer[value_start_pos:pos])) ++ if pos > endpoint: ++ if element in enum_type.values_by_number: ++ del value[-1] # Discard corrupt value. ++ else: ++ del message._unknown_fields[-1] ++ raise _DecodeError('Packed element was truncated.') ++ return pos ++ return DecodePackedField ++ elif is_repeated: ++ tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT) ++ tag_len = len(tag_bytes) ++ def DecodeRepeatedField(buffer, pos, end, message, field_dict): ++ value = field_dict.get(key) ++ if value is None: ++ value = field_dict.setdefault(key, new_default(message)) ++ while 1: ++ (element, new_pos) = _DecodeSignedVarint32(buffer, pos) ++ if element in enum_type.values_by_number: ++ value.append(element) ++ else: ++ if not message._unknown_fields: ++ message._unknown_fields = [] ++ message._unknown_fields.append( ++ (tag_bytes, buffer[pos:new_pos])) ++ # Predict that the next tag is another copy of the same repeated ++ # field. ++ pos = new_pos + tag_len ++ if buffer[new_pos:pos] != tag_bytes or new_pos >= end: ++ # Prediction failed. Return. ++ if new_pos > end: ++ raise _DecodeError('Truncated message.') ++ return new_pos ++ return DecodeRepeatedField ++ else: ++ def DecodeField(buffer, pos, end, message, field_dict): ++ value_start_pos = pos ++ (enum_value, pos) = _DecodeSignedVarint32(buffer, pos) ++ if pos > end: ++ raise _DecodeError('Truncated message.') ++ if enum_value in enum_type.values_by_number: ++ field_dict[key] = enum_value ++ else: ++ if not message._unknown_fields: ++ message._unknown_fields = [] ++ tag_bytes = encoder.TagBytes(field_number, ++ wire_format.WIRETYPE_VARINT) ++ message._unknown_fields.append( ++ (tag_bytes, buffer[value_start_pos:pos])) ++ return pos ++ return DecodeField ++ ++ ++# -------------------------------------------------------------------- ++ ++ ++Int32Decoder = _SimpleDecoder( ++ wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32) ++ ++Int64Decoder = _SimpleDecoder( ++ wire_format.WIRETYPE_VARINT, _DecodeSignedVarint) ++ ++UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32) ++UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint) ++ ++SInt32Decoder = _ModifiedDecoder( ++ wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode) ++SInt64Decoder = _ModifiedDecoder( ++ wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode) ++ ++# Note that Python conveniently guarantees that when using the '<' prefix on ++# formats, they will also have the same size across all platforms (as opposed ++# to without the prefix, where their sizes depend on the C compiler's basic ++# type sizes). ++Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, ' end: ++ raise _DecodeError('Truncated string.') ++ value.append(_ConvertToUnicode(buffer[pos:new_pos])) ++ # Predict that the next tag is another copy of the same repeated field. ++ pos = new_pos + tag_len ++ if buffer[new_pos:pos] != tag_bytes or new_pos == end: ++ # Prediction failed. Return. ++ return new_pos ++ return DecodeRepeatedField ++ else: ++ def DecodeField(buffer, pos, end, message, field_dict): ++ (size, pos) = local_DecodeVarint(buffer, pos) ++ new_pos = pos + size ++ if new_pos > end: ++ raise _DecodeError('Truncated string.') ++ field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos]) ++ return new_pos ++ return DecodeField ++ ++ ++def BytesDecoder(field_number, is_repeated, is_packed, key, new_default): ++ """Returns a decoder for a bytes field.""" ++ ++ local_DecodeVarint = _DecodeVarint ++ ++ assert not is_packed ++ if is_repeated: ++ tag_bytes = encoder.TagBytes(field_number, ++ wire_format.WIRETYPE_LENGTH_DELIMITED) ++ tag_len = len(tag_bytes) ++ def DecodeRepeatedField(buffer, pos, end, message, field_dict): ++ value = field_dict.get(key) ++ if value is None: ++ value = field_dict.setdefault(key, new_default(message)) ++ while 1: ++ (size, pos) = local_DecodeVarint(buffer, pos) ++ new_pos = pos + size ++ if new_pos > end: ++ raise _DecodeError('Truncated string.') ++ value.append(buffer[pos:new_pos]) ++ # Predict that the next tag is another copy of the same repeated field. ++ pos = new_pos + tag_len ++ if buffer[new_pos:pos] != tag_bytes or new_pos == end: ++ # Prediction failed. Return. ++ return new_pos ++ return DecodeRepeatedField ++ else: ++ def DecodeField(buffer, pos, end, message, field_dict): ++ (size, pos) = local_DecodeVarint(buffer, pos) ++ new_pos = pos + size ++ if new_pos > end: ++ raise _DecodeError('Truncated string.') ++ field_dict[key] = buffer[pos:new_pos] ++ return new_pos ++ return DecodeField ++ ++ ++def GroupDecoder(field_number, is_repeated, is_packed, key, new_default): ++ """Returns a decoder for a group field.""" ++ ++ end_tag_bytes = encoder.TagBytes(field_number, ++ wire_format.WIRETYPE_END_GROUP) ++ end_tag_len = len(end_tag_bytes) ++ ++ assert not is_packed ++ if is_repeated: ++ tag_bytes = encoder.TagBytes(field_number, ++ wire_format.WIRETYPE_START_GROUP) ++ tag_len = len(tag_bytes) ++ def DecodeRepeatedField(buffer, pos, end, message, field_dict): ++ value = field_dict.get(key) ++ if value is None: ++ value = field_dict.setdefault(key, new_default(message)) ++ while 1: ++ value = field_dict.get(key) ++ if value is None: ++ value = field_dict.setdefault(key, new_default(message)) ++ # Read sub-message. ++ pos = value.add()._InternalParse(buffer, pos, end) ++ # Read end tag. ++ new_pos = pos+end_tag_len ++ if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: ++ raise _DecodeError('Missing group end tag.') ++ # Predict that the next tag is another copy of the same repeated field. ++ pos = new_pos + tag_len ++ if buffer[new_pos:pos] != tag_bytes or new_pos == end: ++ # Prediction failed. Return. ++ return new_pos ++ return DecodeRepeatedField ++ else: ++ def DecodeField(buffer, pos, end, message, field_dict): ++ value = field_dict.get(key) ++ if value is None: ++ value = field_dict.setdefault(key, new_default(message)) ++ # Read sub-message. ++ pos = value._InternalParse(buffer, pos, end) ++ # Read end tag. ++ new_pos = pos+end_tag_len ++ if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: ++ raise _DecodeError('Missing group end tag.') ++ return new_pos ++ return DecodeField ++ ++ ++def MessageDecoder(field_number, is_repeated, is_packed, key, new_default): ++ """Returns a decoder for a message field.""" ++ ++ local_DecodeVarint = _DecodeVarint ++ ++ assert not is_packed ++ if is_repeated: ++ tag_bytes = encoder.TagBytes(field_number, ++ wire_format.WIRETYPE_LENGTH_DELIMITED) ++ tag_len = len(tag_bytes) ++ def DecodeRepeatedField(buffer, pos, end, message, field_dict): ++ value = field_dict.get(key) ++ if value is None: ++ value = field_dict.setdefault(key, new_default(message)) ++ while 1: ++ value = field_dict.get(key) ++ if value is None: ++ value = field_dict.setdefault(key, new_default(message)) ++ # Read length. ++ (size, pos) = local_DecodeVarint(buffer, pos) ++ new_pos = pos + size ++ if new_pos > end: ++ raise _DecodeError('Truncated message.') ++ # Read sub-message. ++ if value.add()._InternalParse(buffer, pos, new_pos) != new_pos: ++ # The only reason _InternalParse would return early is if it ++ # encountered an end-group tag. ++ raise _DecodeError('Unexpected end-group tag.') ++ # Predict that the next tag is another copy of the same repeated field. ++ pos = new_pos + tag_len ++ if buffer[new_pos:pos] != tag_bytes or new_pos == end: ++ # Prediction failed. Return. ++ return new_pos ++ return DecodeRepeatedField ++ else: ++ def DecodeField(buffer, pos, end, message, field_dict): ++ value = field_dict.get(key) ++ if value is None: ++ value = field_dict.setdefault(key, new_default(message)) ++ # Read length. ++ (size, pos) = local_DecodeVarint(buffer, pos) ++ new_pos = pos + size ++ if new_pos > end: ++ raise _DecodeError('Truncated message.') ++ # Read sub-message. ++ if value._InternalParse(buffer, pos, new_pos) != new_pos: ++ # The only reason _InternalParse would return early is if it encountered ++ # an end-group tag. ++ raise _DecodeError('Unexpected end-group tag.') ++ return new_pos ++ return DecodeField ++ ++ ++# -------------------------------------------------------------------- ++ ++MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP) ++ ++def MessageSetItemDecoder(extensions_by_number): ++ """Returns a decoder for a MessageSet item. ++ ++ The parameter is the _extensions_by_number map for the message class. ++ ++ The message set message looks like this: ++ message MessageSet { ++ repeated group Item = 1 { ++ required int32 type_id = 2; ++ required string message = 3; ++ } ++ } ++ """ ++ ++ type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT) ++ message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED) ++ item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP) ++ ++ local_ReadTag = ReadTag ++ local_DecodeVarint = _DecodeVarint ++ local_SkipField = SkipField ++ ++ def DecodeItem(buffer, pos, end, message, field_dict): ++ message_set_item_start = pos ++ type_id = -1 ++ message_start = -1 ++ message_end = -1 ++ ++ # Technically, type_id and message can appear in any order, so we need ++ # a little loop here. ++ while 1: ++ (tag_bytes, pos) = local_ReadTag(buffer, pos) ++ if tag_bytes == type_id_tag_bytes: ++ (type_id, pos) = local_DecodeVarint(buffer, pos) ++ elif tag_bytes == message_tag_bytes: ++ (size, message_start) = local_DecodeVarint(buffer, pos) ++ pos = message_end = message_start + size ++ elif tag_bytes == item_end_tag_bytes: ++ break ++ else: ++ pos = SkipField(buffer, pos, end, tag_bytes) ++ if pos == -1: ++ raise _DecodeError('Missing group end tag.') ++ ++ if pos > end: ++ raise _DecodeError('Truncated message.') ++ ++ if type_id == -1: ++ raise _DecodeError('MessageSet item missing type_id.') ++ if message_start == -1: ++ raise _DecodeError('MessageSet item missing message.') ++ ++ extension = extensions_by_number.get(type_id) ++ if extension is not None: ++ value = field_dict.get(extension) ++ if value is None: ++ value = field_dict.setdefault( ++ extension, extension.message_type._concrete_class()) ++ if value._InternalParse(buffer, message_start,message_end) != message_end: ++ # The only reason _InternalParse would return early is if it encountered ++ # an end-group tag. ++ raise _DecodeError('Unexpected end-group tag.') ++ else: ++ if not message._unknown_fields: ++ message._unknown_fields = [] ++ message._unknown_fields.append((MESSAGE_SET_ITEM_TAG, ++ buffer[message_set_item_start:pos])) ++ ++ return pos ++ ++ return DecodeItem ++ ++# -------------------------------------------------------------------- ++# Optimization is not as heavy here because calls to SkipField() are rare, ++# except for handling end-group tags. ++ ++def _SkipVarint(buffer, pos, end): ++ """Skip a varint value. Returns the new position.""" ++ # Previously ord(buffer[pos]) raised IndexError when pos is out of range. ++ # With this code, ord(b'') raises TypeError. Both are handled in ++ # python_message.py to generate a 'Truncated message' error. ++ while ord(buffer[pos:pos+1]) & 0x80: ++ pos += 1 ++ pos += 1 ++ if pos > end: ++ raise _DecodeError('Truncated message.') ++ return pos ++ ++def _SkipFixed64(buffer, pos, end): ++ """Skip a fixed64 value. Returns the new position.""" ++ ++ pos += 8 ++ if pos > end: ++ raise _DecodeError('Truncated message.') ++ return pos ++ ++def _SkipLengthDelimited(buffer, pos, end): ++ """Skip a length-delimited value. Returns the new position.""" ++ ++ (size, pos) = _DecodeVarint(buffer, pos) ++ pos += size ++ if pos > end: ++ raise _DecodeError('Truncated message.') ++ return pos ++ ++def _SkipGroup(buffer, pos, end): ++ """Skip sub-group. Returns the new position.""" ++ ++ while 1: ++ (tag_bytes, pos) = ReadTag(buffer, pos) ++ new_pos = SkipField(buffer, pos, end, tag_bytes) ++ if new_pos == -1: ++ return pos ++ pos = new_pos ++ ++def _EndGroup(buffer, pos, end): ++ """Skipping an END_GROUP tag returns -1 to tell the parent loop to break.""" ++ ++ return -1 ++ ++def _SkipFixed32(buffer, pos, end): ++ """Skip a fixed32 value. Returns the new position.""" ++ ++ pos += 4 ++ if pos > end: ++ raise _DecodeError('Truncated message.') ++ return pos ++ ++def _RaiseInvalidWireType(buffer, pos, end): ++ """Skip function for unknown wire types. Raises an exception.""" ++ ++ raise _DecodeError('Tag had invalid wire type.') ++ ++def _FieldSkipper(): ++ """Constructs the SkipField function.""" ++ ++ WIRETYPE_TO_SKIPPER = [ ++ _SkipVarint, ++ _SkipFixed64, ++ _SkipLengthDelimited, ++ _SkipGroup, ++ _EndGroup, ++ _SkipFixed32, ++ _RaiseInvalidWireType, ++ _RaiseInvalidWireType, ++ ] ++ ++ wiretype_mask = wire_format.TAG_TYPE_MASK ++ ++ def SkipField(buffer, pos, end, tag_bytes): ++ """Skips a field with the specified tag. ++ ++ |pos| should point to the byte immediately after the tag. ++ ++ Returns: ++ The new position (after the tag value), or -1 if the tag is an end-group ++ tag (in which case the calling loop should break). ++ """ ++ ++ # The wire type is always in the first byte since varints are little-endian. ++ wire_type = ord(tag_bytes[0:1]) & wiretype_mask ++ return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end) ++ ++ return SkipField ++ ++SkipField = _FieldSkipper() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/descriptor_database_test.py +@@ -0,0 +1,63 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Tests for google.protobuf.descriptor_database.""" ++ ++__author__ = 'matthewtoia@google.com (Matt Toia)' ++ ++from google.apputils import basetest ++from google.protobuf import descriptor_pb2 ++from google.protobuf.internal import factory_test2_pb2 ++from google.protobuf import descriptor_database ++ ++ ++class DescriptorDatabaseTest(basetest.TestCase): ++ ++ def testAdd(self): ++ db = descriptor_database.DescriptorDatabase() ++ file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString( ++ factory_test2_pb2.DESCRIPTOR.serialized_pb) ++ db.Add(file_desc_proto) ++ ++ self.assertEquals(file_desc_proto, db.FindFileByName( ++ 'google/protobuf/internal/factory_test2.proto')) ++ self.assertEquals(file_desc_proto, db.FindFileContainingSymbol( ++ 'google.protobuf.python.internal.Factory2Message')) ++ self.assertEquals(file_desc_proto, db.FindFileContainingSymbol( ++ 'google.protobuf.python.internal.Factory2Message.NestedFactory2Message')) ++ self.assertEquals(file_desc_proto, db.FindFileContainingSymbol( ++ 'google.protobuf.python.internal.Factory2Enum')) ++ self.assertEquals(file_desc_proto, db.FindFileContainingSymbol( ++ 'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum')) ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/descriptor_pool_test.py +@@ -0,0 +1,564 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Tests for google.protobuf.descriptor_pool.""" ++ ++__author__ = 'matthewtoia@google.com (Matt Toia)' ++ ++import os ++import unittest ++ ++from google.apputils import basetest ++from google.protobuf import unittest_pb2 ++from google.protobuf import descriptor_pb2 ++from google.protobuf.internal import api_implementation ++from google.protobuf.internal import descriptor_pool_test1_pb2 ++from google.protobuf.internal import descriptor_pool_test2_pb2 ++from google.protobuf.internal import factory_test1_pb2 ++from google.protobuf.internal import factory_test2_pb2 ++from google.protobuf import descriptor ++from google.protobuf import descriptor_database ++from google.protobuf import descriptor_pool ++ ++ ++class DescriptorPoolTest(basetest.TestCase): ++ ++ def setUp(self): ++ self.pool = descriptor_pool.DescriptorPool() ++ self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString( ++ factory_test1_pb2.DESCRIPTOR.serialized_pb) ++ self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString( ++ factory_test2_pb2.DESCRIPTOR.serialized_pb) ++ self.pool.Add(self.factory_test1_fd) ++ self.pool.Add(self.factory_test2_fd) ++ ++ def testFindFileByName(self): ++ name1 = 'google/protobuf/internal/factory_test1.proto' ++ file_desc1 = self.pool.FindFileByName(name1) ++ self.assertIsInstance(file_desc1, descriptor.FileDescriptor) ++ self.assertEquals(name1, file_desc1.name) ++ self.assertEquals('google.protobuf.python.internal', file_desc1.package) ++ self.assertIn('Factory1Message', file_desc1.message_types_by_name) ++ ++ name2 = 'google/protobuf/internal/factory_test2.proto' ++ file_desc2 = self.pool.FindFileByName(name2) ++ self.assertIsInstance(file_desc2, descriptor.FileDescriptor) ++ self.assertEquals(name2, file_desc2.name) ++ self.assertEquals('google.protobuf.python.internal', file_desc2.package) ++ self.assertIn('Factory2Message', file_desc2.message_types_by_name) ++ ++ def testFindFileByNameFailure(self): ++ with self.assertRaises(KeyError): ++ self.pool.FindFileByName('Does not exist') ++ ++ def testFindFileContainingSymbol(self): ++ file_desc1 = self.pool.FindFileContainingSymbol( ++ 'google.protobuf.python.internal.Factory1Message') ++ self.assertIsInstance(file_desc1, descriptor.FileDescriptor) ++ self.assertEquals('google/protobuf/internal/factory_test1.proto', ++ file_desc1.name) ++ self.assertEquals('google.protobuf.python.internal', file_desc1.package) ++ self.assertIn('Factory1Message', file_desc1.message_types_by_name) ++ ++ file_desc2 = self.pool.FindFileContainingSymbol( ++ 'google.protobuf.python.internal.Factory2Message') ++ self.assertIsInstance(file_desc2, descriptor.FileDescriptor) ++ self.assertEquals('google/protobuf/internal/factory_test2.proto', ++ file_desc2.name) ++ self.assertEquals('google.protobuf.python.internal', file_desc2.package) ++ self.assertIn('Factory2Message', file_desc2.message_types_by_name) ++ ++ def testFindFileContainingSymbolFailure(self): ++ with self.assertRaises(KeyError): ++ self.pool.FindFileContainingSymbol('Does not exist') ++ ++ def testFindMessageTypeByName(self): ++ msg1 = self.pool.FindMessageTypeByName( ++ 'google.protobuf.python.internal.Factory1Message') ++ self.assertIsInstance(msg1, descriptor.Descriptor) ++ self.assertEquals('Factory1Message', msg1.name) ++ self.assertEquals('google.protobuf.python.internal.Factory1Message', ++ msg1.full_name) ++ self.assertEquals(None, msg1.containing_type) ++ ++ nested_msg1 = msg1.nested_types[0] ++ self.assertEquals('NestedFactory1Message', nested_msg1.name) ++ self.assertEquals(msg1, nested_msg1.containing_type) ++ ++ nested_enum1 = msg1.enum_types[0] ++ self.assertEquals('NestedFactory1Enum', nested_enum1.name) ++ self.assertEquals(msg1, nested_enum1.containing_type) ++ ++ self.assertEquals(nested_msg1, msg1.fields_by_name[ ++ 'nested_factory_1_message'].message_type) ++ self.assertEquals(nested_enum1, msg1.fields_by_name[ ++ 'nested_factory_1_enum'].enum_type) ++ ++ msg2 = self.pool.FindMessageTypeByName( ++ 'google.protobuf.python.internal.Factory2Message') ++ self.assertIsInstance(msg2, descriptor.Descriptor) ++ self.assertEquals('Factory2Message', msg2.name) ++ self.assertEquals('google.protobuf.python.internal.Factory2Message', ++ msg2.full_name) ++ self.assertIsNone(msg2.containing_type) ++ ++ nested_msg2 = msg2.nested_types[0] ++ self.assertEquals('NestedFactory2Message', nested_msg2.name) ++ self.assertEquals(msg2, nested_msg2.containing_type) ++ ++ nested_enum2 = msg2.enum_types[0] ++ self.assertEquals('NestedFactory2Enum', nested_enum2.name) ++ self.assertEquals(msg2, nested_enum2.containing_type) ++ ++ self.assertEquals(nested_msg2, msg2.fields_by_name[ ++ 'nested_factory_2_message'].message_type) ++ self.assertEquals(nested_enum2, msg2.fields_by_name[ ++ 'nested_factory_2_enum'].enum_type) ++ ++ self.assertTrue(msg2.fields_by_name['int_with_default'].has_default_value) ++ self.assertEquals( ++ 1776, msg2.fields_by_name['int_with_default'].default_value) ++ ++ self.assertTrue( ++ msg2.fields_by_name['double_with_default'].has_default_value) ++ self.assertEquals( ++ 9.99, msg2.fields_by_name['double_with_default'].default_value) ++ ++ self.assertTrue( ++ msg2.fields_by_name['string_with_default'].has_default_value) ++ self.assertEquals( ++ 'hello world', msg2.fields_by_name['string_with_default'].default_value) ++ ++ self.assertTrue(msg2.fields_by_name['bool_with_default'].has_default_value) ++ self.assertFalse(msg2.fields_by_name['bool_with_default'].default_value) ++ ++ self.assertTrue(msg2.fields_by_name['enum_with_default'].has_default_value) ++ self.assertEquals( ++ 1, msg2.fields_by_name['enum_with_default'].default_value) ++ ++ msg3 = self.pool.FindMessageTypeByName( ++ 'google.protobuf.python.internal.Factory2Message.NestedFactory2Message') ++ self.assertEquals(nested_msg2, msg3) ++ ++ self.assertTrue(msg2.fields_by_name['bytes_with_default'].has_default_value) ++ self.assertEquals( ++ b'a\xfb\x00c', ++ msg2.fields_by_name['bytes_with_default'].default_value) ++ ++ self.assertEqual(1, len(msg2.oneofs)) ++ self.assertEqual(1, len(msg2.oneofs_by_name)) ++ self.assertEqual(2, len(msg2.oneofs[0].fields)) ++ for name in ['oneof_int', 'oneof_string']: ++ self.assertEqual(msg2.oneofs[0], ++ msg2.fields_by_name[name].containing_oneof) ++ self.assertIn(msg2.fields_by_name[name], msg2.oneofs[0].fields) ++ ++ def testFindMessageTypeByNameFailure(self): ++ with self.assertRaises(KeyError): ++ self.pool.FindMessageTypeByName('Does not exist') ++ ++ def testFindEnumTypeByName(self): ++ enum1 = self.pool.FindEnumTypeByName( ++ 'google.protobuf.python.internal.Factory1Enum') ++ self.assertIsInstance(enum1, descriptor.EnumDescriptor) ++ self.assertEquals(0, enum1.values_by_name['FACTORY_1_VALUE_0'].number) ++ self.assertEquals(1, enum1.values_by_name['FACTORY_1_VALUE_1'].number) ++ ++ nested_enum1 = self.pool.FindEnumTypeByName( ++ 'google.protobuf.python.internal.Factory1Message.NestedFactory1Enum') ++ self.assertIsInstance(nested_enum1, descriptor.EnumDescriptor) ++ self.assertEquals( ++ 0, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_0'].number) ++ self.assertEquals( ++ 1, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_1'].number) ++ ++ enum2 = self.pool.FindEnumTypeByName( ++ 'google.protobuf.python.internal.Factory2Enum') ++ self.assertIsInstance(enum2, descriptor.EnumDescriptor) ++ self.assertEquals(0, enum2.values_by_name['FACTORY_2_VALUE_0'].number) ++ self.assertEquals(1, enum2.values_by_name['FACTORY_2_VALUE_1'].number) ++ ++ nested_enum2 = self.pool.FindEnumTypeByName( ++ 'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum') ++ self.assertIsInstance(nested_enum2, descriptor.EnumDescriptor) ++ self.assertEquals( ++ 0, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_0'].number) ++ self.assertEquals( ++ 1, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_1'].number) ++ ++ def testFindEnumTypeByNameFailure(self): ++ with self.assertRaises(KeyError): ++ self.pool.FindEnumTypeByName('Does not exist') ++ ++ def testUserDefinedDB(self): ++ db = descriptor_database.DescriptorDatabase() ++ self.pool = descriptor_pool.DescriptorPool(db) ++ db.Add(self.factory_test1_fd) ++ db.Add(self.factory_test2_fd) ++ self.testFindMessageTypeByName() ++ ++ def testComplexNesting(self): ++ test1_desc = descriptor_pb2.FileDescriptorProto.FromString( ++ descriptor_pool_test1_pb2.DESCRIPTOR.serialized_pb) ++ test2_desc = descriptor_pb2.FileDescriptorProto.FromString( ++ descriptor_pool_test2_pb2.DESCRIPTOR.serialized_pb) ++ self.pool.Add(test1_desc) ++ self.pool.Add(test2_desc) ++ TEST1_FILE.CheckFile(self, self.pool) ++ TEST2_FILE.CheckFile(self, self.pool) ++ ++ ++ ++class ProtoFile(object): ++ ++ def __init__(self, name, package, messages, dependencies=None): ++ self.name = name ++ self.package = package ++ self.messages = messages ++ self.dependencies = dependencies or [] ++ ++ def CheckFile(self, test, pool): ++ file_desc = pool.FindFileByName(self.name) ++ test.assertEquals(self.name, file_desc.name) ++ test.assertEquals(self.package, file_desc.package) ++ dependencies_names = [f.name for f in file_desc.dependencies] ++ test.assertEqual(self.dependencies, dependencies_names) ++ for name, msg_type in self.messages.items(): ++ msg_type.CheckType(test, None, name, file_desc) ++ ++ ++class EnumType(object): ++ ++ def __init__(self, values): ++ self.values = values ++ ++ def CheckType(self, test, msg_desc, name, file_desc): ++ enum_desc = msg_desc.enum_types_by_name[name] ++ test.assertEqual(name, enum_desc.name) ++ expected_enum_full_name = '.'.join([msg_desc.full_name, name]) ++ test.assertEqual(expected_enum_full_name, enum_desc.full_name) ++ test.assertEqual(msg_desc, enum_desc.containing_type) ++ test.assertEqual(file_desc, enum_desc.file) ++ for index, (value, number) in enumerate(self.values): ++ value_desc = enum_desc.values_by_name[value] ++ test.assertEqual(value, value_desc.name) ++ test.assertEqual(index, value_desc.index) ++ test.assertEqual(number, value_desc.number) ++ test.assertEqual(enum_desc, value_desc.type) ++ test.assertIn(value, msg_desc.enum_values_by_name) ++ ++ ++class MessageType(object): ++ ++ def __init__(self, type_dict, field_list, is_extendable=False, ++ extensions=None): ++ self.type_dict = type_dict ++ self.field_list = field_list ++ self.is_extendable = is_extendable ++ self.extensions = extensions or [] ++ ++ def CheckType(self, test, containing_type_desc, name, file_desc): ++ if containing_type_desc is None: ++ desc = file_desc.message_types_by_name[name] ++ expected_full_name = '.'.join([file_desc.package, name]) ++ else: ++ desc = containing_type_desc.nested_types_by_name[name] ++ expected_full_name = '.'.join([containing_type_desc.full_name, name]) ++ ++ test.assertEqual(name, desc.name) ++ test.assertEqual(expected_full_name, desc.full_name) ++ test.assertEqual(containing_type_desc, desc.containing_type) ++ test.assertEqual(desc.file, file_desc) ++ test.assertEqual(self.is_extendable, desc.is_extendable) ++ for name, subtype in self.type_dict.items(): ++ subtype.CheckType(test, desc, name, file_desc) ++ ++ for index, (name, field) in enumerate(self.field_list): ++ field.CheckField(test, desc, name, index) ++ ++ for index, (name, field) in enumerate(self.extensions): ++ field.CheckField(test, desc, name, index) ++ ++ ++class EnumField(object): ++ ++ def __init__(self, number, type_name, default_value): ++ self.number = number ++ self.type_name = type_name ++ self.default_value = default_value ++ ++ def CheckField(self, test, msg_desc, name, index): ++ field_desc = msg_desc.fields_by_name[name] ++ enum_desc = msg_desc.enum_types_by_name[self.type_name] ++ test.assertEqual(name, field_desc.name) ++ expected_field_full_name = '.'.join([msg_desc.full_name, name]) ++ test.assertEqual(expected_field_full_name, field_desc.full_name) ++ test.assertEqual(index, field_desc.index) ++ test.assertEqual(self.number, field_desc.number) ++ test.assertEqual(descriptor.FieldDescriptor.TYPE_ENUM, field_desc.type) ++ test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_ENUM, ++ field_desc.cpp_type) ++ test.assertTrue(field_desc.has_default_value) ++ test.assertEqual(enum_desc.values_by_name[self.default_value].index, ++ field_desc.default_value) ++ test.assertEqual(msg_desc, field_desc.containing_type) ++ test.assertEqual(enum_desc, field_desc.enum_type) ++ ++ ++class MessageField(object): ++ ++ def __init__(self, number, type_name): ++ self.number = number ++ self.type_name = type_name ++ ++ def CheckField(self, test, msg_desc, name, index): ++ field_desc = msg_desc.fields_by_name[name] ++ field_type_desc = msg_desc.nested_types_by_name[self.type_name] ++ test.assertEqual(name, field_desc.name) ++ expected_field_full_name = '.'.join([msg_desc.full_name, name]) ++ test.assertEqual(expected_field_full_name, field_desc.full_name) ++ test.assertEqual(index, field_desc.index) ++ test.assertEqual(self.number, field_desc.number) ++ test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type) ++ test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE, ++ field_desc.cpp_type) ++ test.assertFalse(field_desc.has_default_value) ++ test.assertEqual(msg_desc, field_desc.containing_type) ++ test.assertEqual(field_type_desc, field_desc.message_type) ++ ++ ++class StringField(object): ++ ++ def __init__(self, number, default_value): ++ self.number = number ++ self.default_value = default_value ++ ++ def CheckField(self, test, msg_desc, name, index): ++ field_desc = msg_desc.fields_by_name[name] ++ test.assertEqual(name, field_desc.name) ++ expected_field_full_name = '.'.join([msg_desc.full_name, name]) ++ test.assertEqual(expected_field_full_name, field_desc.full_name) ++ test.assertEqual(index, field_desc.index) ++ test.assertEqual(self.number, field_desc.number) ++ test.assertEqual(descriptor.FieldDescriptor.TYPE_STRING, field_desc.type) ++ test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_STRING, ++ field_desc.cpp_type) ++ test.assertTrue(field_desc.has_default_value) ++ test.assertEqual(self.default_value, field_desc.default_value) ++ ++ ++class ExtensionField(object): ++ ++ def __init__(self, number, extended_type): ++ self.number = number ++ self.extended_type = extended_type ++ ++ def CheckField(self, test, msg_desc, name, index): ++ field_desc = msg_desc.extensions_by_name[name] ++ test.assertEqual(name, field_desc.name) ++ expected_field_full_name = '.'.join([msg_desc.full_name, name]) ++ test.assertEqual(expected_field_full_name, field_desc.full_name) ++ test.assertEqual(self.number, field_desc.number) ++ test.assertEqual(index, field_desc.index) ++ test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type) ++ test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE, ++ field_desc.cpp_type) ++ test.assertFalse(field_desc.has_default_value) ++ test.assertTrue(field_desc.is_extension) ++ test.assertEqual(msg_desc, field_desc.extension_scope) ++ test.assertEqual(msg_desc, field_desc.message_type) ++ test.assertEqual(self.extended_type, field_desc.containing_type.name) ++ ++ ++class AddDescriptorTest(basetest.TestCase): ++ ++ def _TestMessage(self, prefix): ++ pool = descriptor_pool.DescriptorPool() ++ pool.AddDescriptor(unittest_pb2.TestAllTypes.DESCRIPTOR) ++ self.assertEquals( ++ 'protobuf_unittest.TestAllTypes', ++ pool.FindMessageTypeByName( ++ prefix + 'protobuf_unittest.TestAllTypes').full_name) ++ ++ # AddDescriptor is not recursive. ++ with self.assertRaises(KeyError): ++ pool.FindMessageTypeByName( ++ prefix + 'protobuf_unittest.TestAllTypes.NestedMessage') ++ ++ pool.AddDescriptor(unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR) ++ self.assertEquals( ++ 'protobuf_unittest.TestAllTypes.NestedMessage', ++ pool.FindMessageTypeByName( ++ prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').full_name) ++ ++ # Files are implicitly also indexed when messages are added. ++ self.assertEquals( ++ 'google/protobuf/unittest.proto', ++ pool.FindFileByName( ++ 'google/protobuf/unittest.proto').name) ++ ++ self.assertEquals( ++ 'google/protobuf/unittest.proto', ++ pool.FindFileContainingSymbol( ++ prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').name) ++ ++ def testMessage(self): ++ self._TestMessage('') ++ self._TestMessage('.') ++ ++ def _TestEnum(self, prefix): ++ pool = descriptor_pool.DescriptorPool() ++ pool.AddEnumDescriptor(unittest_pb2.ForeignEnum.DESCRIPTOR) ++ self.assertEquals( ++ 'protobuf_unittest.ForeignEnum', ++ pool.FindEnumTypeByName( ++ prefix + 'protobuf_unittest.ForeignEnum').full_name) ++ ++ # AddEnumDescriptor is not recursive. ++ with self.assertRaises(KeyError): ++ pool.FindEnumTypeByName( ++ prefix + 'protobuf_unittest.ForeignEnum.NestedEnum') ++ ++ pool.AddEnumDescriptor(unittest_pb2.TestAllTypes.NestedEnum.DESCRIPTOR) ++ self.assertEquals( ++ 'protobuf_unittest.TestAllTypes.NestedEnum', ++ pool.FindEnumTypeByName( ++ prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').full_name) ++ ++ # Files are implicitly also indexed when enums are added. ++ self.assertEquals( ++ 'google/protobuf/unittest.proto', ++ pool.FindFileByName( ++ 'google/protobuf/unittest.proto').name) ++ ++ self.assertEquals( ++ 'google/protobuf/unittest.proto', ++ pool.FindFileContainingSymbol( ++ prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').name) ++ ++ def testEnum(self): ++ self._TestEnum('') ++ self._TestEnum('.') ++ ++ def testFile(self): ++ pool = descriptor_pool.DescriptorPool() ++ pool.AddFileDescriptor(unittest_pb2.DESCRIPTOR) ++ self.assertEquals( ++ 'google/protobuf/unittest.proto', ++ pool.FindFileByName( ++ 'google/protobuf/unittest.proto').name) ++ ++ # AddFileDescriptor is not recursive; messages and enums within files must ++ # be explicitly registered. ++ with self.assertRaises(KeyError): ++ pool.FindFileContainingSymbol( ++ 'protobuf_unittest.TestAllTypes') ++ ++ ++TEST1_FILE = ProtoFile( ++ 'google/protobuf/internal/descriptor_pool_test1.proto', ++ 'google.protobuf.python.internal', ++ { ++ 'DescriptorPoolTest1': MessageType({ ++ 'NestedEnum': EnumType([('ALPHA', 1), ('BETA', 2)]), ++ 'NestedMessage': MessageType({ ++ 'NestedEnum': EnumType([('EPSILON', 5), ('ZETA', 6)]), ++ 'DeepNestedMessage': MessageType({ ++ 'NestedEnum': EnumType([('ETA', 7), ('THETA', 8)]), ++ }, [ ++ ('nested_enum', EnumField(1, 'NestedEnum', 'ETA')), ++ ('nested_field', StringField(2, 'theta')), ++ ]), ++ }, [ ++ ('nested_enum', EnumField(1, 'NestedEnum', 'ZETA')), ++ ('nested_field', StringField(2, 'beta')), ++ ('deep_nested_message', MessageField(3, 'DeepNestedMessage')), ++ ]) ++ }, [ ++ ('nested_enum', EnumField(1, 'NestedEnum', 'BETA')), ++ ('nested_message', MessageField(2, 'NestedMessage')), ++ ], is_extendable=True), ++ ++ 'DescriptorPoolTest2': MessageType({ ++ 'NestedEnum': EnumType([('GAMMA', 3), ('DELTA', 4)]), ++ 'NestedMessage': MessageType({ ++ 'NestedEnum': EnumType([('IOTA', 9), ('KAPPA', 10)]), ++ 'DeepNestedMessage': MessageType({ ++ 'NestedEnum': EnumType([('LAMBDA', 11), ('MU', 12)]), ++ }, [ ++ ('nested_enum', EnumField(1, 'NestedEnum', 'MU')), ++ ('nested_field', StringField(2, 'lambda')), ++ ]), ++ }, [ ++ ('nested_enum', EnumField(1, 'NestedEnum', 'IOTA')), ++ ('nested_field', StringField(2, 'delta')), ++ ('deep_nested_message', MessageField(3, 'DeepNestedMessage')), ++ ]) ++ }, [ ++ ('nested_enum', EnumField(1, 'NestedEnum', 'GAMMA')), ++ ('nested_message', MessageField(2, 'NestedMessage')), ++ ]), ++ }) ++ ++ ++TEST2_FILE = ProtoFile( ++ 'google/protobuf/internal/descriptor_pool_test2.proto', ++ 'google.protobuf.python.internal', ++ { ++ 'DescriptorPoolTest3': MessageType({ ++ 'NestedEnum': EnumType([('NU', 13), ('XI', 14)]), ++ 'NestedMessage': MessageType({ ++ 'NestedEnum': EnumType([('OMICRON', 15), ('PI', 16)]), ++ 'DeepNestedMessage': MessageType({ ++ 'NestedEnum': EnumType([('RHO', 17), ('SIGMA', 18)]), ++ }, [ ++ ('nested_enum', EnumField(1, 'NestedEnum', 'RHO')), ++ ('nested_field', StringField(2, 'sigma')), ++ ]), ++ }, [ ++ ('nested_enum', EnumField(1, 'NestedEnum', 'PI')), ++ ('nested_field', StringField(2, 'nu')), ++ ('deep_nested_message', MessageField(3, 'DeepNestedMessage')), ++ ]) ++ }, [ ++ ('nested_enum', EnumField(1, 'NestedEnum', 'XI')), ++ ('nested_message', MessageField(2, 'NestedMessage')), ++ ], extensions=[ ++ ('descriptor_pool_test', ++ ExtensionField(1001, 'DescriptorPoolTest1')), ++ ]), ++ }, ++ dependencies=['google/protobuf/internal/descriptor_pool_test1.proto']) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/descriptor_pool_test1.proto +@@ -0,0 +1,94 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++package google.protobuf.python.internal; ++ ++ ++message DescriptorPoolTest1 { ++ extensions 1000 to max; ++ ++ enum NestedEnum { ++ ALPHA = 1; ++ BETA = 2; ++ } ++ ++ optional NestedEnum nested_enum = 1 [default = BETA]; ++ ++ message NestedMessage { ++ enum NestedEnum { ++ EPSILON = 5; ++ ZETA = 6; ++ } ++ optional NestedEnum nested_enum = 1 [default = ZETA]; ++ optional string nested_field = 2 [default = "beta"]; ++ optional DeepNestedMessage deep_nested_message = 3; ++ ++ message DeepNestedMessage { ++ enum NestedEnum { ++ ETA = 7; ++ THETA = 8; ++ } ++ optional NestedEnum nested_enum = 1 [default = ETA]; ++ optional string nested_field = 2 [default = "theta"]; ++ } ++ } ++ ++ optional NestedMessage nested_message = 2; ++} ++ ++message DescriptorPoolTest2 { ++ enum NestedEnum { ++ GAMMA = 3; ++ DELTA = 4; ++ } ++ ++ optional NestedEnum nested_enum = 1 [default = GAMMA]; ++ ++ message NestedMessage { ++ enum NestedEnum { ++ IOTA = 9; ++ KAPPA = 10; ++ } ++ optional NestedEnum nested_enum = 1 [default = IOTA]; ++ optional string nested_field = 2 [default = "delta"]; ++ optional DeepNestedMessage deep_nested_message = 3; ++ ++ message DeepNestedMessage { ++ enum NestedEnum { ++ LAMBDA = 11; ++ MU = 12; ++ } ++ optional NestedEnum nested_enum = 1 [default = MU]; ++ optional string nested_field = 2 [default = "lambda"]; ++ } ++ } ++ ++ optional NestedMessage nested_message = 2; ++} +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/descriptor_pool_test2.proto +@@ -0,0 +1,70 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++package google.protobuf.python.internal; ++ ++import "google/protobuf/internal/descriptor_pool_test1.proto"; ++ ++ ++message DescriptorPoolTest3 { ++ ++ extend DescriptorPoolTest1 { ++ optional DescriptorPoolTest3 descriptor_pool_test = 1001; ++ } ++ ++ enum NestedEnum { ++ NU = 13; ++ XI = 14; ++ } ++ ++ optional NestedEnum nested_enum = 1 [default = XI]; ++ ++ message NestedMessage { ++ enum NestedEnum { ++ OMICRON = 15; ++ PI = 16; ++ } ++ optional NestedEnum nested_enum = 1 [default = PI]; ++ optional string nested_field = 2 [default = "nu"]; ++ optional DeepNestedMessage deep_nested_message = 3; ++ ++ message DeepNestedMessage { ++ enum NestedEnum { ++ RHO = 17; ++ SIGMA = 18; ++ } ++ optional NestedEnum nested_enum = 1 [default = RHO]; ++ optional string nested_field = 2 [default = "sigma"]; ++ } ++ } ++ ++ optional NestedMessage nested_message = 2; ++} ++ +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/descriptor_python_test.py +@@ -0,0 +1,54 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Unittest for descriptor.py for the pure Python implementation.""" ++ ++import os ++os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python' ++ ++# We must set the implementation version above before the google3 imports. ++# pylint: disable=g-import-not-at-top ++from google.apputils import basetest ++from google.protobuf.internal import api_implementation ++# Run all tests from the original module by putting them in our namespace. ++# pylint: disable=wildcard-import ++from google.protobuf.internal.descriptor_test import * ++ ++ ++class ConfirmPurePythonTest(basetest.TestCase): ++ ++ def testImplementationSetting(self): ++ self.assertEqual('python', api_implementation.Type()) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/descriptor_test.py +@@ -0,0 +1,669 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Unittest for google.protobuf.internal.descriptor.""" ++ ++__author__ = 'robinson@google.com (Will Robinson)' ++ ++from google.apputils import basetest ++from google.protobuf import unittest_custom_options_pb2 ++from google.protobuf import unittest_import_pb2 ++from google.protobuf import unittest_pb2 ++from google.protobuf import descriptor_pb2 ++from google.protobuf import descriptor ++from google.protobuf import text_format ++ ++ ++TEST_EMPTY_MESSAGE_DESCRIPTOR_ASCII = """ ++name: 'TestEmptyMessage' ++""" ++ ++ ++class DescriptorTest(basetest.TestCase): ++ ++ def setUp(self): ++ self.my_file = descriptor.FileDescriptor( ++ name='some/filename/some.proto', ++ package='protobuf_unittest' ++ ) ++ self.my_enum = descriptor.EnumDescriptor( ++ name='ForeignEnum', ++ full_name='protobuf_unittest.ForeignEnum', ++ filename=None, ++ file=self.my_file, ++ values=[ ++ descriptor.EnumValueDescriptor(name='FOREIGN_FOO', index=0, number=4), ++ descriptor.EnumValueDescriptor(name='FOREIGN_BAR', index=1, number=5), ++ descriptor.EnumValueDescriptor(name='FOREIGN_BAZ', index=2, number=6), ++ ]) ++ self.my_message = descriptor.Descriptor( ++ name='NestedMessage', ++ full_name='protobuf_unittest.TestAllTypes.NestedMessage', ++ filename=None, ++ file=self.my_file, ++ containing_type=None, ++ fields=[ ++ descriptor.FieldDescriptor( ++ name='bb', ++ full_name='protobuf_unittest.TestAllTypes.NestedMessage.bb', ++ index=0, number=1, ++ type=5, cpp_type=1, label=1, ++ has_default_value=False, default_value=0, ++ message_type=None, enum_type=None, containing_type=None, ++ is_extension=False, extension_scope=None), ++ ], ++ nested_types=[], ++ enum_types=[ ++ self.my_enum, ++ ], ++ extensions=[]) ++ self.my_method = descriptor.MethodDescriptor( ++ name='Bar', ++ full_name='protobuf_unittest.TestService.Bar', ++ index=0, ++ containing_service=None, ++ input_type=None, ++ output_type=None) ++ self.my_service = descriptor.ServiceDescriptor( ++ name='TestServiceWithOptions', ++ full_name='protobuf_unittest.TestServiceWithOptions', ++ file=self.my_file, ++ index=0, ++ methods=[ ++ self.my_method ++ ]) ++ ++ def testEnumValueName(self): ++ self.assertEqual(self.my_message.EnumValueName('ForeignEnum', 4), ++ 'FOREIGN_FOO') ++ ++ self.assertEqual( ++ self.my_message.enum_types_by_name[ ++ 'ForeignEnum'].values_by_number[4].name, ++ self.my_message.EnumValueName('ForeignEnum', 4)) ++ ++ def testEnumFixups(self): ++ self.assertEqual(self.my_enum, self.my_enum.values[0].type) ++ ++ def testContainingTypeFixups(self): ++ self.assertEqual(self.my_message, self.my_message.fields[0].containing_type) ++ self.assertEqual(self.my_message, self.my_enum.containing_type) ++ ++ def testContainingServiceFixups(self): ++ self.assertEqual(self.my_service, self.my_method.containing_service) ++ ++ def testGetOptions(self): ++ self.assertEqual(self.my_enum.GetOptions(), ++ descriptor_pb2.EnumOptions()) ++ self.assertEqual(self.my_enum.values[0].GetOptions(), ++ descriptor_pb2.EnumValueOptions()) ++ self.assertEqual(self.my_message.GetOptions(), ++ descriptor_pb2.MessageOptions()) ++ self.assertEqual(self.my_message.fields[0].GetOptions(), ++ descriptor_pb2.FieldOptions()) ++ self.assertEqual(self.my_method.GetOptions(), ++ descriptor_pb2.MethodOptions()) ++ self.assertEqual(self.my_service.GetOptions(), ++ descriptor_pb2.ServiceOptions()) ++ ++ def testSimpleCustomOptions(self): ++ file_descriptor = unittest_custom_options_pb2.DESCRIPTOR ++ message_descriptor =\ ++ unittest_custom_options_pb2.TestMessageWithCustomOptions.DESCRIPTOR ++ field_descriptor = message_descriptor.fields_by_name["field1"] ++ enum_descriptor = message_descriptor.enum_types_by_name["AnEnum"] ++ enum_value_descriptor =\ ++ message_descriptor.enum_values_by_name["ANENUM_VAL2"] ++ service_descriptor =\ ++ unittest_custom_options_pb2.TestServiceWithCustomOptions.DESCRIPTOR ++ method_descriptor = service_descriptor.FindMethodByName("Foo") ++ ++ file_options = file_descriptor.GetOptions() ++ file_opt1 = unittest_custom_options_pb2.file_opt1 ++ self.assertEqual(9876543210, file_options.Extensions[file_opt1]) ++ message_options = message_descriptor.GetOptions() ++ message_opt1 = unittest_custom_options_pb2.message_opt1 ++ self.assertEqual(-56, message_options.Extensions[message_opt1]) ++ field_options = field_descriptor.GetOptions() ++ field_opt1 = unittest_custom_options_pb2.field_opt1 ++ self.assertEqual(8765432109, field_options.Extensions[field_opt1]) ++ field_opt2 = unittest_custom_options_pb2.field_opt2 ++ self.assertEqual(42, field_options.Extensions[field_opt2]) ++ enum_options = enum_descriptor.GetOptions() ++ enum_opt1 = unittest_custom_options_pb2.enum_opt1 ++ self.assertEqual(-789, enum_options.Extensions[enum_opt1]) ++ enum_value_options = enum_value_descriptor.GetOptions() ++ enum_value_opt1 = unittest_custom_options_pb2.enum_value_opt1 ++ self.assertEqual(123, enum_value_options.Extensions[enum_value_opt1]) ++ ++ service_options = service_descriptor.GetOptions() ++ service_opt1 = unittest_custom_options_pb2.service_opt1 ++ self.assertEqual(-9876543210, service_options.Extensions[service_opt1]) ++ method_options = method_descriptor.GetOptions() ++ method_opt1 = unittest_custom_options_pb2.method_opt1 ++ self.assertEqual(unittest_custom_options_pb2.METHODOPT1_VAL2, ++ method_options.Extensions[method_opt1]) ++ ++ def testDifferentCustomOptionTypes(self): ++ kint32min = -2**31 ++ kint64min = -2**63 ++ kint32max = 2**31 - 1 ++ kint64max = 2**63 - 1 ++ kuint32max = 2**32 - 1 ++ kuint64max = 2**64 - 1 ++ ++ message_descriptor =\ ++ unittest_custom_options_pb2.CustomOptionMinIntegerValues.DESCRIPTOR ++ message_options = message_descriptor.GetOptions() ++ self.assertEqual(False, message_options.Extensions[ ++ unittest_custom_options_pb2.bool_opt]) ++ self.assertEqual(kint32min, message_options.Extensions[ ++ unittest_custom_options_pb2.int32_opt]) ++ self.assertEqual(kint64min, message_options.Extensions[ ++ unittest_custom_options_pb2.int64_opt]) ++ self.assertEqual(0, message_options.Extensions[ ++ unittest_custom_options_pb2.uint32_opt]) ++ self.assertEqual(0, message_options.Extensions[ ++ unittest_custom_options_pb2.uint64_opt]) ++ self.assertEqual(kint32min, message_options.Extensions[ ++ unittest_custom_options_pb2.sint32_opt]) ++ self.assertEqual(kint64min, message_options.Extensions[ ++ unittest_custom_options_pb2.sint64_opt]) ++ self.assertEqual(0, message_options.Extensions[ ++ unittest_custom_options_pb2.fixed32_opt]) ++ self.assertEqual(0, message_options.Extensions[ ++ unittest_custom_options_pb2.fixed64_opt]) ++ self.assertEqual(kint32min, message_options.Extensions[ ++ unittest_custom_options_pb2.sfixed32_opt]) ++ self.assertEqual(kint64min, message_options.Extensions[ ++ unittest_custom_options_pb2.sfixed64_opt]) ++ ++ message_descriptor =\ ++ unittest_custom_options_pb2.CustomOptionMaxIntegerValues.DESCRIPTOR ++ message_options = message_descriptor.GetOptions() ++ self.assertEqual(True, message_options.Extensions[ ++ unittest_custom_options_pb2.bool_opt]) ++ self.assertEqual(kint32max, message_options.Extensions[ ++ unittest_custom_options_pb2.int32_opt]) ++ self.assertEqual(kint64max, message_options.Extensions[ ++ unittest_custom_options_pb2.int64_opt]) ++ self.assertEqual(kuint32max, message_options.Extensions[ ++ unittest_custom_options_pb2.uint32_opt]) ++ self.assertEqual(kuint64max, message_options.Extensions[ ++ unittest_custom_options_pb2.uint64_opt]) ++ self.assertEqual(kint32max, message_options.Extensions[ ++ unittest_custom_options_pb2.sint32_opt]) ++ self.assertEqual(kint64max, message_options.Extensions[ ++ unittest_custom_options_pb2.sint64_opt]) ++ self.assertEqual(kuint32max, message_options.Extensions[ ++ unittest_custom_options_pb2.fixed32_opt]) ++ self.assertEqual(kuint64max, message_options.Extensions[ ++ unittest_custom_options_pb2.fixed64_opt]) ++ self.assertEqual(kint32max, message_options.Extensions[ ++ unittest_custom_options_pb2.sfixed32_opt]) ++ self.assertEqual(kint64max, message_options.Extensions[ ++ unittest_custom_options_pb2.sfixed64_opt]) ++ ++ message_descriptor =\ ++ unittest_custom_options_pb2.CustomOptionOtherValues.DESCRIPTOR ++ message_options = message_descriptor.GetOptions() ++ self.assertEqual(-100, message_options.Extensions[ ++ unittest_custom_options_pb2.int32_opt]) ++ self.assertAlmostEqual(12.3456789, message_options.Extensions[ ++ unittest_custom_options_pb2.float_opt], 6) ++ self.assertAlmostEqual(1.234567890123456789, message_options.Extensions[ ++ unittest_custom_options_pb2.double_opt]) ++ self.assertEqual("Hello, \"World\"", message_options.Extensions[ ++ unittest_custom_options_pb2.string_opt]) ++ self.assertEqual(b"Hello\0World", message_options.Extensions[ ++ unittest_custom_options_pb2.bytes_opt]) ++ dummy_enum = unittest_custom_options_pb2.DummyMessageContainingEnum ++ self.assertEqual( ++ dummy_enum.TEST_OPTION_ENUM_TYPE2, ++ message_options.Extensions[unittest_custom_options_pb2.enum_opt]) ++ ++ message_descriptor =\ ++ unittest_custom_options_pb2.SettingRealsFromPositiveInts.DESCRIPTOR ++ message_options = message_descriptor.GetOptions() ++ self.assertAlmostEqual(12, message_options.Extensions[ ++ unittest_custom_options_pb2.float_opt], 6) ++ self.assertAlmostEqual(154, message_options.Extensions[ ++ unittest_custom_options_pb2.double_opt]) ++ ++ message_descriptor =\ ++ unittest_custom_options_pb2.SettingRealsFromNegativeInts.DESCRIPTOR ++ message_options = message_descriptor.GetOptions() ++ self.assertAlmostEqual(-12, message_options.Extensions[ ++ unittest_custom_options_pb2.float_opt], 6) ++ self.assertAlmostEqual(-154, message_options.Extensions[ ++ unittest_custom_options_pb2.double_opt]) ++ ++ def testComplexExtensionOptions(self): ++ descriptor =\ ++ unittest_custom_options_pb2.VariousComplexOptions.DESCRIPTOR ++ options = descriptor.GetOptions() ++ self.assertEqual(42, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt1].foo) ++ self.assertEqual(324, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt1].Extensions[ ++ unittest_custom_options_pb2.quux]) ++ self.assertEqual(876, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt1].Extensions[ ++ unittest_custom_options_pb2.corge].qux) ++ self.assertEqual(987, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt2].baz) ++ self.assertEqual(654, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt2].Extensions[ ++ unittest_custom_options_pb2.grault]) ++ self.assertEqual(743, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt2].bar.foo) ++ self.assertEqual(1999, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt2].bar.Extensions[ ++ unittest_custom_options_pb2.quux]) ++ self.assertEqual(2008, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt2].bar.Extensions[ ++ unittest_custom_options_pb2.corge].qux) ++ self.assertEqual(741, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt2].Extensions[ ++ unittest_custom_options_pb2.garply].foo) ++ self.assertEqual(1998, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt2].Extensions[ ++ unittest_custom_options_pb2.garply].Extensions[ ++ unittest_custom_options_pb2.quux]) ++ self.assertEqual(2121, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt2].Extensions[ ++ unittest_custom_options_pb2.garply].Extensions[ ++ unittest_custom_options_pb2.corge].qux) ++ self.assertEqual(1971, options.Extensions[ ++ unittest_custom_options_pb2.ComplexOptionType2 ++ .ComplexOptionType4.complex_opt4].waldo) ++ self.assertEqual(321, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt2].fred.waldo) ++ self.assertEqual(9, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt3].qux) ++ self.assertEqual(22, options.Extensions[ ++ unittest_custom_options_pb2.complex_opt3].complexoptiontype5.plugh) ++ self.assertEqual(24, options.Extensions[ ++ unittest_custom_options_pb2.complexopt6].xyzzy) ++ ++ # Check that aggregate options were parsed and saved correctly in ++ # the appropriate descriptors. ++ def testAggregateOptions(self): ++ file_descriptor = unittest_custom_options_pb2.DESCRIPTOR ++ message_descriptor =\ ++ unittest_custom_options_pb2.AggregateMessage.DESCRIPTOR ++ field_descriptor = message_descriptor.fields_by_name["fieldname"] ++ enum_descriptor = unittest_custom_options_pb2.AggregateEnum.DESCRIPTOR ++ enum_value_descriptor = enum_descriptor.values_by_name["VALUE"] ++ service_descriptor =\ ++ unittest_custom_options_pb2.AggregateService.DESCRIPTOR ++ method_descriptor = service_descriptor.FindMethodByName("Method") ++ ++ # Tests for the different types of data embedded in fileopt ++ file_options = file_descriptor.GetOptions().Extensions[ ++ unittest_custom_options_pb2.fileopt] ++ self.assertEqual(100, file_options.i) ++ self.assertEqual("FileAnnotation", file_options.s) ++ self.assertEqual("NestedFileAnnotation", file_options.sub.s) ++ self.assertEqual("FileExtensionAnnotation", file_options.file.Extensions[ ++ unittest_custom_options_pb2.fileopt].s) ++ self.assertEqual("EmbeddedMessageSetElement", file_options.mset.Extensions[ ++ unittest_custom_options_pb2.AggregateMessageSetElement ++ .message_set_extension].s) ++ ++ # Simple tests for all the other types of annotations ++ self.assertEqual( ++ "MessageAnnotation", ++ message_descriptor.GetOptions().Extensions[ ++ unittest_custom_options_pb2.msgopt].s) ++ self.assertEqual( ++ "FieldAnnotation", ++ field_descriptor.GetOptions().Extensions[ ++ unittest_custom_options_pb2.fieldopt].s) ++ self.assertEqual( ++ "EnumAnnotation", ++ enum_descriptor.GetOptions().Extensions[ ++ unittest_custom_options_pb2.enumopt].s) ++ self.assertEqual( ++ "EnumValueAnnotation", ++ enum_value_descriptor.GetOptions().Extensions[ ++ unittest_custom_options_pb2.enumvalopt].s) ++ self.assertEqual( ++ "ServiceAnnotation", ++ service_descriptor.GetOptions().Extensions[ ++ unittest_custom_options_pb2.serviceopt].s) ++ self.assertEqual( ++ "MethodAnnotation", ++ method_descriptor.GetOptions().Extensions[ ++ unittest_custom_options_pb2.methodopt].s) ++ ++ def testNestedOptions(self): ++ nested_message =\ ++ unittest_custom_options_pb2.NestedOptionType.NestedMessage.DESCRIPTOR ++ self.assertEqual(1001, nested_message.GetOptions().Extensions[ ++ unittest_custom_options_pb2.message_opt1]) ++ nested_field = nested_message.fields_by_name["nested_field"] ++ self.assertEqual(1002, nested_field.GetOptions().Extensions[ ++ unittest_custom_options_pb2.field_opt1]) ++ outer_message =\ ++ unittest_custom_options_pb2.NestedOptionType.DESCRIPTOR ++ nested_enum = outer_message.enum_types_by_name["NestedEnum"] ++ self.assertEqual(1003, nested_enum.GetOptions().Extensions[ ++ unittest_custom_options_pb2.enum_opt1]) ++ nested_enum_value = outer_message.enum_values_by_name["NESTED_ENUM_VALUE"] ++ self.assertEqual(1004, nested_enum_value.GetOptions().Extensions[ ++ unittest_custom_options_pb2.enum_value_opt1]) ++ nested_extension = outer_message.extensions_by_name["nested_extension"] ++ self.assertEqual(1005, nested_extension.GetOptions().Extensions[ ++ unittest_custom_options_pb2.field_opt2]) ++ ++ def testFileDescriptorReferences(self): ++ self.assertEqual(self.my_enum.file, self.my_file) ++ self.assertEqual(self.my_message.file, self.my_file) ++ ++ def testFileDescriptor(self): ++ self.assertEqual(self.my_file.name, 'some/filename/some.proto') ++ self.assertEqual(self.my_file.package, 'protobuf_unittest') ++ ++ ++class DescriptorCopyToProtoTest(basetest.TestCase): ++ """Tests for CopyTo functions of Descriptor.""" ++ ++ def _AssertProtoEqual(self, actual_proto, expected_class, expected_ascii): ++ expected_proto = expected_class() ++ text_format.Merge(expected_ascii, expected_proto) ++ ++ self.assertEqual( ++ actual_proto, expected_proto, ++ 'Not equal,\nActual:\n%s\nExpected:\n%s\n' ++ % (str(actual_proto), str(expected_proto))) ++ ++ def _InternalTestCopyToProto(self, desc, expected_proto_class, ++ expected_proto_ascii): ++ actual = expected_proto_class() ++ desc.CopyToProto(actual) ++ self._AssertProtoEqual( ++ actual, expected_proto_class, expected_proto_ascii) ++ ++ def testCopyToProto_EmptyMessage(self): ++ self._InternalTestCopyToProto( ++ unittest_pb2.TestEmptyMessage.DESCRIPTOR, ++ descriptor_pb2.DescriptorProto, ++ TEST_EMPTY_MESSAGE_DESCRIPTOR_ASCII) ++ ++ def testCopyToProto_NestedMessage(self): ++ TEST_NESTED_MESSAGE_ASCII = """ ++ name: 'NestedMessage' ++ field: < ++ name: 'bb' ++ number: 1 ++ label: 1 # Optional ++ type: 5 # TYPE_INT32 ++ > ++ """ ++ ++ self._InternalTestCopyToProto( ++ unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR, ++ descriptor_pb2.DescriptorProto, ++ TEST_NESTED_MESSAGE_ASCII) ++ ++ def testCopyToProto_ForeignNestedMessage(self): ++ TEST_FOREIGN_NESTED_ASCII = """ ++ name: 'TestForeignNested' ++ field: < ++ name: 'foreign_nested' ++ number: 1 ++ label: 1 # Optional ++ type: 11 # TYPE_MESSAGE ++ type_name: '.protobuf_unittest.TestAllTypes.NestedMessage' ++ > ++ """ ++ ++ self._InternalTestCopyToProto( ++ unittest_pb2.TestForeignNested.DESCRIPTOR, ++ descriptor_pb2.DescriptorProto, ++ TEST_FOREIGN_NESTED_ASCII) ++ ++ def testCopyToProto_ForeignEnum(self): ++ TEST_FOREIGN_ENUM_ASCII = """ ++ name: 'ForeignEnum' ++ value: < ++ name: 'FOREIGN_FOO' ++ number: 4 ++ > ++ value: < ++ name: 'FOREIGN_BAR' ++ number: 5 ++ > ++ value: < ++ name: 'FOREIGN_BAZ' ++ number: 6 ++ > ++ """ ++ ++ self._InternalTestCopyToProto( ++ unittest_pb2._FOREIGNENUM, ++ descriptor_pb2.EnumDescriptorProto, ++ TEST_FOREIGN_ENUM_ASCII) ++ ++ def testCopyToProto_Options(self): ++ TEST_DEPRECATED_FIELDS_ASCII = """ ++ name: 'TestDeprecatedFields' ++ field: < ++ name: 'deprecated_int32' ++ number: 1 ++ label: 1 # Optional ++ type: 5 # TYPE_INT32 ++ options: < ++ deprecated: true ++ > ++ > ++ """ ++ ++ self._InternalTestCopyToProto( ++ unittest_pb2.TestDeprecatedFields.DESCRIPTOR, ++ descriptor_pb2.DescriptorProto, ++ TEST_DEPRECATED_FIELDS_ASCII) ++ ++ def testCopyToProto_AllExtensions(self): ++ TEST_EMPTY_MESSAGE_WITH_EXTENSIONS_ASCII = """ ++ name: 'TestEmptyMessageWithExtensions' ++ extension_range: < ++ start: 1 ++ end: 536870912 ++ > ++ """ ++ ++ self._InternalTestCopyToProto( ++ unittest_pb2.TestEmptyMessageWithExtensions.DESCRIPTOR, ++ descriptor_pb2.DescriptorProto, ++ TEST_EMPTY_MESSAGE_WITH_EXTENSIONS_ASCII) ++ ++ def testCopyToProto_SeveralExtensions(self): ++ TEST_MESSAGE_WITH_SEVERAL_EXTENSIONS_ASCII = """ ++ name: 'TestMultipleExtensionRanges' ++ extension_range: < ++ start: 42 ++ end: 43 ++ > ++ extension_range: < ++ start: 4143 ++ end: 4244 ++ > ++ extension_range: < ++ start: 65536 ++ end: 536870912 ++ > ++ """ ++ ++ self._InternalTestCopyToProto( ++ unittest_pb2.TestMultipleExtensionRanges.DESCRIPTOR, ++ descriptor_pb2.DescriptorProto, ++ TEST_MESSAGE_WITH_SEVERAL_EXTENSIONS_ASCII) ++ ++ # Disable this test so we can make changes to the proto file. ++ # TODO(xiaofeng): Enable this test after cl/55530659 is submitted. ++ # ++ # def testCopyToProto_FileDescriptor(self): ++ # UNITTEST_IMPORT_FILE_DESCRIPTOR_ASCII = (""" ++ # name: 'google/protobuf/unittest_import.proto' ++ # package: 'protobuf_unittest_import' ++ # dependency: 'google/protobuf/unittest_import_public.proto' ++ # message_type: < ++ # name: 'ImportMessage' ++ # field: < ++ # name: 'd' ++ # number: 1 ++ # label: 1 # Optional ++ # type: 5 # TYPE_INT32 ++ # > ++ # > ++ # """ + ++ # """enum_type: < ++ # name: 'ImportEnum' ++ # value: < ++ # name: 'IMPORT_FOO' ++ # number: 7 ++ # > ++ # value: < ++ # name: 'IMPORT_BAR' ++ # number: 8 ++ # > ++ # value: < ++ # name: 'IMPORT_BAZ' ++ # number: 9 ++ # > ++ # > ++ # options: < ++ # java_package: 'com.google.protobuf.test' ++ # optimize_for: 1 # SPEED ++ # > ++ # public_dependency: 0 ++ # """) ++ # self._InternalTestCopyToProto( ++ # unittest_import_pb2.DESCRIPTOR, ++ # descriptor_pb2.FileDescriptorProto, ++ # UNITTEST_IMPORT_FILE_DESCRIPTOR_ASCII) ++ ++ def testCopyToProto_ServiceDescriptor(self): ++ TEST_SERVICE_ASCII = """ ++ name: 'TestService' ++ method: < ++ name: 'Foo' ++ input_type: '.protobuf_unittest.FooRequest' ++ output_type: '.protobuf_unittest.FooResponse' ++ > ++ method: < ++ name: 'Bar' ++ input_type: '.protobuf_unittest.BarRequest' ++ output_type: '.protobuf_unittest.BarResponse' ++ > ++ """ ++ self._InternalTestCopyToProto( ++ unittest_pb2.TestService.DESCRIPTOR, ++ descriptor_pb2.ServiceDescriptorProto, ++ TEST_SERVICE_ASCII) ++ ++ ++class MakeDescriptorTest(basetest.TestCase): ++ ++ def testMakeDescriptorWithNestedFields(self): ++ file_descriptor_proto = descriptor_pb2.FileDescriptorProto() ++ file_descriptor_proto.name = 'Foo2' ++ message_type = file_descriptor_proto.message_type.add() ++ message_type.name = file_descriptor_proto.name ++ nested_type = message_type.nested_type.add() ++ nested_type.name = 'Sub' ++ enum_type = nested_type.enum_type.add() ++ enum_type.name = 'FOO' ++ enum_type_val = enum_type.value.add() ++ enum_type_val.name = 'BAR' ++ enum_type_val.number = 3 ++ field = message_type.field.add() ++ field.number = 1 ++ field.name = 'uint64_field' ++ field.label = descriptor.FieldDescriptor.LABEL_REQUIRED ++ field.type = descriptor.FieldDescriptor.TYPE_UINT64 ++ field = message_type.field.add() ++ field.number = 2 ++ field.name = 'nested_message_field' ++ field.label = descriptor.FieldDescriptor.LABEL_REQUIRED ++ field.type = descriptor.FieldDescriptor.TYPE_MESSAGE ++ field.type_name = 'Sub' ++ enum_field = nested_type.field.add() ++ enum_field.number = 2 ++ enum_field.name = 'bar_field' ++ enum_field.label = descriptor.FieldDescriptor.LABEL_REQUIRED ++ enum_field.type = descriptor.FieldDescriptor.TYPE_ENUM ++ enum_field.type_name = 'Foo2.Sub.FOO' ++ ++ result = descriptor.MakeDescriptor(message_type) ++ self.assertEqual(result.fields[0].cpp_type, ++ descriptor.FieldDescriptor.CPPTYPE_UINT64) ++ self.assertEqual(result.fields[1].cpp_type, ++ descriptor.FieldDescriptor.CPPTYPE_MESSAGE) ++ self.assertEqual(result.fields[1].message_type.containing_type, ++ result) ++ self.assertEqual(result.nested_types[0].fields[0].full_name, ++ 'Foo2.Sub.bar_field') ++ self.assertEqual(result.nested_types[0].fields[0].enum_type, ++ result.nested_types[0].enum_types[0]) ++ ++ def testMakeDescriptorWithUnsignedIntField(self): ++ file_descriptor_proto = descriptor_pb2.FileDescriptorProto() ++ file_descriptor_proto.name = 'Foo' ++ message_type = file_descriptor_proto.message_type.add() ++ message_type.name = file_descriptor_proto.name ++ enum_type = message_type.enum_type.add() ++ enum_type.name = 'FOO' ++ enum_type_val = enum_type.value.add() ++ enum_type_val.name = 'BAR' ++ enum_type_val.number = 3 ++ field = message_type.field.add() ++ field.number = 1 ++ field.name = 'uint64_field' ++ field.label = descriptor.FieldDescriptor.LABEL_REQUIRED ++ field.type = descriptor.FieldDescriptor.TYPE_UINT64 ++ enum_field = message_type.field.add() ++ enum_field.number = 2 ++ enum_field.name = 'bar_field' ++ enum_field.label = descriptor.FieldDescriptor.LABEL_REQUIRED ++ enum_field.type = descriptor.FieldDescriptor.TYPE_ENUM ++ enum_field.type_name = 'Foo.FOO' ++ ++ result = descriptor.MakeDescriptor(message_type) ++ self.assertEqual(result.fields[0].cpp_type, ++ descriptor.FieldDescriptor.CPPTYPE_UINT64) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/encoder.py +@@ -0,0 +1,772 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# Copyright 2009 Google Inc. All Rights Reserved. ++ ++"""Code for encoding protocol message primitives. ++ ++Contains the logic for encoding every logical protocol field type ++into one of the 5 physical wire types. ++ ++This code is designed to push the Python interpreter's performance to the ++limits. ++ ++The basic idea is that at startup time, for every field (i.e. every ++FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The ++sizer takes a value of this field's type and computes its byte size. The ++encoder takes a writer function and a value. It encodes the value into byte ++strings and invokes the writer function to write those strings. Typically the ++writer function is the write() method of a BytesIO. ++ ++We try to do as much work as possible when constructing the writer and the ++sizer rather than when calling them. In particular: ++* We copy any needed global functions to local variables, so that we do not need ++ to do costly global table lookups at runtime. ++* Similarly, we try to do any attribute lookups at startup time if possible. ++* Every field's tag is encoded to bytes at startup, since it can't change at ++ runtime. ++* Whatever component of the field size we can compute at startup, we do. ++* We *avoid* sharing code if doing so would make the code slower and not sharing ++ does not burden us too much. For example, encoders for repeated fields do ++ not just call the encoders for singular fields in a loop because this would ++ add an extra function call overhead for every loop iteration; instead, we ++ manually inline the single-value encoder into the loop. ++* If a Python function lacks a return statement, Python actually generates ++ instructions to pop the result of the last statement off the stack, push ++ None onto the stack, and then return that. If we really don't care what ++ value is returned, then we can save two instructions by returning the ++ result of the last statement. It looks funny but it helps. ++* We assume that type and bounds checking has happened at a higher level. ++""" ++ ++__author__ = 'kenton@google.com (Kenton Varda)' ++ ++import struct ++ ++import six ++ ++from google.protobuf.internal import wire_format ++ ++ ++# This will overflow and thus become IEEE-754 "infinity". We would use ++# "float('inf')" but it doesn't work on Windows pre-Python-2.6. ++_POS_INF = 1e10000 ++_NEG_INF = -_POS_INF ++ ++ ++def _VarintSize(value): ++ """Compute the size of a varint value.""" ++ if value <= 0x7f: return 1 ++ if value <= 0x3fff: return 2 ++ if value <= 0x1fffff: return 3 ++ if value <= 0xfffffff: return 4 ++ if value <= 0x7ffffffff: return 5 ++ if value <= 0x3ffffffffff: return 6 ++ if value <= 0x1ffffffffffff: return 7 ++ if value <= 0xffffffffffffff: return 8 ++ if value <= 0x7fffffffffffffff: return 9 ++ return 10 ++ ++ ++def _SignedVarintSize(value): ++ """Compute the size of a signed varint value.""" ++ if value < 0: return 10 ++ if value <= 0x7f: return 1 ++ if value <= 0x3fff: return 2 ++ if value <= 0x1fffff: return 3 ++ if value <= 0xfffffff: return 4 ++ if value <= 0x7ffffffff: return 5 ++ if value <= 0x3ffffffffff: return 6 ++ if value <= 0x1ffffffffffff: return 7 ++ if value <= 0xffffffffffffff: return 8 ++ if value <= 0x7fffffffffffffff: return 9 ++ return 10 ++ ++ ++def _TagSize(field_number): ++ """Returns the number of bytes required to serialize a tag with this field ++ number.""" ++ # Just pass in type 0, since the type won't affect the tag+type size. ++ return _VarintSize(wire_format.PackTag(field_number, 0)) ++ ++ ++# -------------------------------------------------------------------- ++# In this section we define some generic sizers. Each of these functions ++# takes parameters specific to a particular field type, e.g. int32 or fixed64. ++# It returns another function which in turn takes parameters specific to a ++# particular field, e.g. the field number and whether it is repeated or packed. ++# Look at the next section to see how these are used. ++ ++ ++def _SimpleSizer(compute_value_size): ++ """A sizer which uses the function compute_value_size to compute the size of ++ each value. Typically compute_value_size is _VarintSize.""" ++ ++ def SpecificSizer(field_number, is_repeated, is_packed): ++ tag_size = _TagSize(field_number) ++ if is_packed: ++ local_VarintSize = _VarintSize ++ def PackedFieldSize(value): ++ result = 0 ++ for element in value: ++ result += compute_value_size(element) ++ return result + local_VarintSize(result) + tag_size ++ return PackedFieldSize ++ elif is_repeated: ++ def RepeatedFieldSize(value): ++ result = tag_size * len(value) ++ for element in value: ++ result += compute_value_size(element) ++ return result ++ return RepeatedFieldSize ++ else: ++ def FieldSize(value): ++ return tag_size + compute_value_size(value) ++ return FieldSize ++ ++ return SpecificSizer ++ ++ ++def _ModifiedSizer(compute_value_size, modify_value): ++ """Like SimpleSizer, but modify_value is invoked on each value before it is ++ passed to compute_value_size. modify_value is typically ZigZagEncode.""" ++ ++ def SpecificSizer(field_number, is_repeated, is_packed): ++ tag_size = _TagSize(field_number) ++ if is_packed: ++ local_VarintSize = _VarintSize ++ def PackedFieldSize(value): ++ result = 0 ++ for element in value: ++ result += compute_value_size(modify_value(element)) ++ return result + local_VarintSize(result) + tag_size ++ return PackedFieldSize ++ elif is_repeated: ++ def RepeatedFieldSize(value): ++ result = tag_size * len(value) ++ for element in value: ++ result += compute_value_size(modify_value(element)) ++ return result ++ return RepeatedFieldSize ++ else: ++ def FieldSize(value): ++ return tag_size + compute_value_size(modify_value(value)) ++ return FieldSize ++ ++ return SpecificSizer ++ ++ ++def _FixedSizer(value_size): ++ """Like _SimpleSizer except for a fixed-size field. The input is the size ++ of one value.""" ++ ++ def SpecificSizer(field_number, is_repeated, is_packed): ++ tag_size = _TagSize(field_number) ++ if is_packed: ++ local_VarintSize = _VarintSize ++ def PackedFieldSize(value): ++ result = len(value) * value_size ++ return result + local_VarintSize(result) + tag_size ++ return PackedFieldSize ++ elif is_repeated: ++ element_size = value_size + tag_size ++ def RepeatedFieldSize(value): ++ return len(value) * element_size ++ return RepeatedFieldSize ++ else: ++ field_size = value_size + tag_size ++ def FieldSize(value): ++ return field_size ++ return FieldSize ++ ++ return SpecificSizer ++ ++ ++# ==================================================================== ++# Here we declare a sizer constructor for each field type. Each "sizer ++# constructor" is a function that takes (field_number, is_repeated, is_packed) ++# as parameters and returns a sizer, which in turn takes a field value as ++# a parameter and returns its encoded size. ++ ++ ++Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize) ++ ++UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize) ++ ++SInt32Sizer = SInt64Sizer = _ModifiedSizer( ++ _SignedVarintSize, wire_format.ZigZagEncode) ++ ++Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4) ++Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8) ++ ++BoolSizer = _FixedSizer(1) ++ ++ ++def StringSizer(field_number, is_repeated, is_packed): ++ """Returns a sizer for a string field.""" ++ ++ tag_size = _TagSize(field_number) ++ local_VarintSize = _VarintSize ++ local_len = len ++ assert not is_packed ++ if is_repeated: ++ def RepeatedFieldSize(value): ++ result = tag_size * len(value) ++ for element in value: ++ l = local_len(element.encode('utf-8')) ++ result += local_VarintSize(l) + l ++ return result ++ return RepeatedFieldSize ++ else: ++ def FieldSize(value): ++ l = local_len(value.encode('utf-8')) ++ return tag_size + local_VarintSize(l) + l ++ return FieldSize ++ ++ ++def BytesSizer(field_number, is_repeated, is_packed): ++ """Returns a sizer for a bytes field.""" ++ ++ tag_size = _TagSize(field_number) ++ local_VarintSize = _VarintSize ++ local_len = len ++ assert not is_packed ++ if is_repeated: ++ def RepeatedFieldSize(value): ++ result = tag_size * len(value) ++ for element in value: ++ l = local_len(element) ++ result += local_VarintSize(l) + l ++ return result ++ return RepeatedFieldSize ++ else: ++ def FieldSize(value): ++ l = local_len(value) ++ return tag_size + local_VarintSize(l) + l ++ return FieldSize ++ ++ ++def GroupSizer(field_number, is_repeated, is_packed): ++ """Returns a sizer for a group field.""" ++ ++ tag_size = _TagSize(field_number) * 2 ++ assert not is_packed ++ if is_repeated: ++ def RepeatedFieldSize(value): ++ result = tag_size * len(value) ++ for element in value: ++ result += element.ByteSize() ++ return result ++ return RepeatedFieldSize ++ else: ++ def FieldSize(value): ++ return tag_size + value.ByteSize() ++ return FieldSize ++ ++ ++def MessageSizer(field_number, is_repeated, is_packed): ++ """Returns a sizer for a message field.""" ++ ++ tag_size = _TagSize(field_number) ++ local_VarintSize = _VarintSize ++ assert not is_packed ++ if is_repeated: ++ def RepeatedFieldSize(value): ++ result = tag_size * len(value) ++ for element in value: ++ l = element.ByteSize() ++ result += local_VarintSize(l) + l ++ return result ++ return RepeatedFieldSize ++ else: ++ def FieldSize(value): ++ l = value.ByteSize() ++ return tag_size + local_VarintSize(l) + l ++ return FieldSize ++ ++ ++# -------------------------------------------------------------------- ++# MessageSet is special. ++ ++ ++def MessageSetItemSizer(field_number): ++ """Returns a sizer for extensions of MessageSet. ++ ++ The message set message looks like this: ++ message MessageSet { ++ repeated group Item = 1 { ++ required int32 type_id = 2; ++ required string message = 3; ++ } ++ } ++ """ ++ static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) + ++ _TagSize(3)) ++ local_VarintSize = _VarintSize ++ ++ def FieldSize(value): ++ l = value.ByteSize() ++ return static_size + local_VarintSize(l) + l ++ ++ return FieldSize ++ ++ ++# ==================================================================== ++# Encoders! ++ ++ ++def _VarintEncoder(): ++ """Return an encoder for a basic varint value (does not include tag).""" ++ ++ def EncodeVarint(write, value): ++ bits = value & 0x7f ++ value >>= 7 ++ while value: ++ write(six.int2byte(0x80|bits)) ++ bits = value & 0x7f ++ value >>= 7 ++ return write(six.int2byte(bits)) ++ ++ return EncodeVarint ++ ++ ++def _SignedVarintEncoder(): ++ """Return an encoder for a basic signed varint value (does not include ++ tag).""" ++ ++ def EncodeSignedVarint(write, value): ++ if value < 0: ++ value += (1 << 64) ++ bits = value & 0x7f ++ value >>= 7 ++ while value: ++ write(six.int2byte(0x80|bits)) ++ bits = value & 0x7f ++ value >>= 7 ++ return write(six.int2byte(bits)) ++ ++ return EncodeSignedVarint ++ ++ ++_EncodeVarint = _VarintEncoder() ++_EncodeSignedVarint = _SignedVarintEncoder() ++ ++ ++def _VarintBytes(value): ++ """Encode the given integer as a varint and return the bytes. This is only ++ called at startup time so it doesn't need to be fast.""" ++ ++ pieces = [] ++ _EncodeVarint(pieces.append, value) ++ return b"".join(pieces) ++ ++ ++def TagBytes(field_number, wire_type): ++ """Encode the given tag and return the bytes. Only called at startup.""" ++ ++ return _VarintBytes(wire_format.PackTag(field_number, wire_type)) ++ ++# -------------------------------------------------------------------- ++# As with sizers (see above), we have a number of common encoder ++# implementations. ++ ++ ++def _SimpleEncoder(wire_type, encode_value, compute_value_size): ++ """Return a constructor for an encoder for fields of a particular type. ++ ++ Args: ++ wire_type: The field's wire type, for encoding tags. ++ encode_value: A function which encodes an individual value, e.g. ++ _EncodeVarint(). ++ compute_value_size: A function which computes the size of an individual ++ value, e.g. _VarintSize(). ++ """ ++ ++ def SpecificEncoder(field_number, is_repeated, is_packed): ++ if is_packed: ++ tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) ++ local_EncodeVarint = _EncodeVarint ++ def EncodePackedField(write, value): ++ write(tag_bytes) ++ size = 0 ++ for element in value: ++ size += compute_value_size(element) ++ local_EncodeVarint(write, size) ++ for element in value: ++ encode_value(write, element) ++ return EncodePackedField ++ elif is_repeated: ++ tag_bytes = TagBytes(field_number, wire_type) ++ def EncodeRepeatedField(write, value): ++ for element in value: ++ write(tag_bytes) ++ encode_value(write, element) ++ return EncodeRepeatedField ++ else: ++ tag_bytes = TagBytes(field_number, wire_type) ++ def EncodeField(write, value): ++ write(tag_bytes) ++ return encode_value(write, value) ++ return EncodeField ++ ++ return SpecificEncoder ++ ++ ++def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value): ++ """Like SimpleEncoder but additionally invokes modify_value on every value ++ before passing it to encode_value. Usually modify_value is ZigZagEncode.""" ++ ++ def SpecificEncoder(field_number, is_repeated, is_packed): ++ if is_packed: ++ tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) ++ local_EncodeVarint = _EncodeVarint ++ def EncodePackedField(write, value): ++ write(tag_bytes) ++ size = 0 ++ for element in value: ++ size += compute_value_size(modify_value(element)) ++ local_EncodeVarint(write, size) ++ for element in value: ++ encode_value(write, modify_value(element)) ++ return EncodePackedField ++ elif is_repeated: ++ tag_bytes = TagBytes(field_number, wire_type) ++ def EncodeRepeatedField(write, value): ++ for element in value: ++ write(tag_bytes) ++ encode_value(write, modify_value(element)) ++ return EncodeRepeatedField ++ else: ++ tag_bytes = TagBytes(field_number, wire_type) ++ def EncodeField(write, value): ++ write(tag_bytes) ++ return encode_value(write, modify_value(value)) ++ return EncodeField ++ ++ return SpecificEncoder ++ ++ ++def _StructPackEncoder(wire_type, format): ++ """Return a constructor for an encoder for a fixed-width field. ++ ++ Args: ++ wire_type: The field's wire type, for encoding tags. ++ format: The format string to pass to struct.pack(). ++ """ ++ ++ value_size = struct.calcsize(format) ++ ++ def SpecificEncoder(field_number, is_repeated, is_packed): ++ local_struct_pack = struct.pack ++ if is_packed: ++ tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) ++ local_EncodeVarint = _EncodeVarint ++ def EncodePackedField(write, value): ++ write(tag_bytes) ++ local_EncodeVarint(write, len(value) * value_size) ++ for element in value: ++ write(local_struct_pack(format, element)) ++ return EncodePackedField ++ elif is_repeated: ++ tag_bytes = TagBytes(field_number, wire_type) ++ def EncodeRepeatedField(write, value): ++ for element in value: ++ write(tag_bytes) ++ write(local_struct_pack(format, element)) ++ return EncodeRepeatedField ++ else: ++ tag_bytes = TagBytes(field_number, wire_type) ++ def EncodeField(write, value): ++ write(tag_bytes) ++ return write(local_struct_pack(format, value)) ++ return EncodeField ++ ++ return SpecificEncoder ++ ++ ++def _FloatingPointEncoder(wire_type, format): ++ """Return a constructor for an encoder for float fields. ++ ++ This is like StructPackEncoder, but catches errors that may be due to ++ passing non-finite floating-point values to struct.pack, and makes a ++ second attempt to encode those values. ++ ++ Args: ++ wire_type: The field's wire type, for encoding tags. ++ format: The format string to pass to struct.pack(). ++ """ ++ ++ value_size = struct.calcsize(format) ++ if value_size == 4: ++ def EncodeNonFiniteOrRaise(write, value): ++ # Remember that the serialized form uses little-endian byte order. ++ if value == _POS_INF: ++ write(b'\x00\x00\x80\x7F') ++ elif value == _NEG_INF: ++ write(b'\x00\x00\x80\xFF') ++ elif value != value: # NaN ++ write(b'\x00\x00\xC0\x7F') ++ else: ++ raise ++ elif value_size == 8: ++ def EncodeNonFiniteOrRaise(write, value): ++ if value == _POS_INF: ++ write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') ++ elif value == _NEG_INF: ++ write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') ++ elif value != value: # NaN ++ write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') ++ else: ++ raise ++ else: ++ raise ValueError('Can\'t encode floating-point values that are ' ++ '%d bytes long (only 4 or 8)' % value_size) ++ ++ def SpecificEncoder(field_number, is_repeated, is_packed): ++ local_struct_pack = struct.pack ++ if is_packed: ++ tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) ++ local_EncodeVarint = _EncodeVarint ++ def EncodePackedField(write, value): ++ write(tag_bytes) ++ local_EncodeVarint(write, len(value) * value_size) ++ for element in value: ++ # This try/except block is going to be faster than any code that ++ # we could write to check whether element is finite. ++ try: ++ write(local_struct_pack(format, element)) ++ except SystemError: ++ EncodeNonFiniteOrRaise(write, element) ++ return EncodePackedField ++ elif is_repeated: ++ tag_bytes = TagBytes(field_number, wire_type) ++ def EncodeRepeatedField(write, value): ++ for element in value: ++ write(tag_bytes) ++ try: ++ write(local_struct_pack(format, element)) ++ except SystemError: ++ EncodeNonFiniteOrRaise(write, element) ++ return EncodeRepeatedField ++ else: ++ tag_bytes = TagBytes(field_number, wire_type) ++ def EncodeField(write, value): ++ write(tag_bytes) ++ try: ++ write(local_struct_pack(format, value)) ++ except SystemError: ++ EncodeNonFiniteOrRaise(write, value) ++ return EncodeField ++ ++ return SpecificEncoder ++ ++ ++# ==================================================================== ++# Here we declare an encoder constructor for each field type. These work ++# very similarly to sizer constructors, described earlier. ++ ++ ++Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder( ++ wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize) ++ ++UInt32Encoder = UInt64Encoder = _SimpleEncoder( ++ wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize) ++ ++SInt32Encoder = SInt64Encoder = _ModifiedEncoder( ++ wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize, ++ wire_format.ZigZagEncode) ++ ++# Note that Python conveniently guarantees that when using the '<' prefix on ++# formats, they will also have the same size across all platforms (as opposed ++# to without the prefix, where their sizes depend on the C compiler's basic ++# type sizes). ++Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, ' 0) ++ self.assertTrue(isinf(message.neg_inf_double)) ++ self.assertTrue(message.neg_inf_double < 0) ++ self.assertTrue(isnan(message.nan_double)) ++ ++ self.assertTrue(isinf(message.inf_float)) ++ self.assertTrue(message.inf_float > 0) ++ self.assertTrue(isinf(message.neg_inf_float)) ++ self.assertTrue(message.neg_inf_float < 0) ++ self.assertTrue(isnan(message.nan_float)) ++ self.assertEqual("? ? ?? ?? ??? ??/ ??-", message.cpp_trigraph) ++ ++ def testHasDefaultValues(self): ++ desc = unittest_pb2.TestAllTypes.DESCRIPTOR ++ ++ expected_has_default_by_name = { ++ 'optional_int32': False, ++ 'repeated_int32': False, ++ 'optional_nested_message': False, ++ 'default_int32': True, ++ } ++ ++ has_default_by_name = dict( ++ [(f.name, f.has_default_value) ++ for f in desc.fields ++ if f.name in expected_has_default_by_name]) ++ self.assertEqual(expected_has_default_by_name, has_default_by_name) ++ ++ def testContainingTypeBehaviorForExtensions(self): ++ self.assertEqual(unittest_pb2.optional_int32_extension.containing_type, ++ unittest_pb2.TestAllExtensions.DESCRIPTOR) ++ self.assertEqual(unittest_pb2.TestRequired.single.containing_type, ++ unittest_pb2.TestAllExtensions.DESCRIPTOR) ++ ++ def testExtensionScope(self): ++ self.assertEqual(unittest_pb2.optional_int32_extension.extension_scope, ++ None) ++ self.assertEqual(unittest_pb2.TestRequired.single.extension_scope, ++ unittest_pb2.TestRequired.DESCRIPTOR) ++ ++ def testIsExtension(self): ++ self.assertTrue(unittest_pb2.optional_int32_extension.is_extension) ++ self.assertTrue(unittest_pb2.TestRequired.single.is_extension) ++ ++ message_descriptor = unittest_pb2.TestRequired.DESCRIPTOR ++ non_extension_descriptor = message_descriptor.fields_by_name['a'] ++ self.assertTrue(not non_extension_descriptor.is_extension) ++ ++ def testOptions(self): ++ proto = unittest_mset_pb2.TestMessageSet() ++ self.assertTrue(proto.DESCRIPTOR.GetOptions().message_set_wire_format) ++ ++ def testMessageWithCustomOptions(self): ++ proto = unittest_custom_options_pb2.TestMessageWithCustomOptions() ++ enum_options = proto.DESCRIPTOR.enum_types_by_name['AnEnum'].GetOptions() ++ self.assertTrue(enum_options is not None) ++ # TODO(gps): We really should test for the presense of the enum_opt1 ++ # extension and for its value to be set to -789. ++ ++ def testNestedTypes(self): ++ self.assertEquals( ++ set(unittest_pb2.TestAllTypes.DESCRIPTOR.nested_types), ++ set([ ++ unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR, ++ unittest_pb2.TestAllTypes.OptionalGroup.DESCRIPTOR, ++ unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR, ++ ])) ++ self.assertEqual(unittest_pb2.TestEmptyMessage.DESCRIPTOR.nested_types, []) ++ self.assertEqual( ++ unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.nested_types, []) ++ ++ def testContainingType(self): ++ self.assertTrue( ++ unittest_pb2.TestEmptyMessage.DESCRIPTOR.containing_type is None) ++ self.assertTrue( ++ unittest_pb2.TestAllTypes.DESCRIPTOR.containing_type is None) ++ self.assertEqual( ++ unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type, ++ unittest_pb2.TestAllTypes.DESCRIPTOR) ++ self.assertEqual( ++ unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type, ++ unittest_pb2.TestAllTypes.DESCRIPTOR) ++ self.assertEqual( ++ unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR.containing_type, ++ unittest_pb2.TestAllTypes.DESCRIPTOR) ++ ++ def testContainingTypeInEnumDescriptor(self): ++ self.assertTrue(unittest_pb2._FOREIGNENUM.containing_type is None) ++ self.assertEqual(unittest_pb2._TESTALLTYPES_NESTEDENUM.containing_type, ++ unittest_pb2.TestAllTypes.DESCRIPTOR) ++ ++ def testPackage(self): ++ self.assertEqual( ++ unittest_pb2.TestAllTypes.DESCRIPTOR.file.package, ++ 'protobuf_unittest') ++ desc = unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR ++ self.assertEqual(desc.file.package, 'protobuf_unittest') ++ self.assertEqual( ++ unittest_import_pb2.ImportMessage.DESCRIPTOR.file.package, ++ 'protobuf_unittest_import') ++ ++ self.assertEqual( ++ unittest_pb2._FOREIGNENUM.file.package, 'protobuf_unittest') ++ self.assertEqual( ++ unittest_pb2._TESTALLTYPES_NESTEDENUM.file.package, ++ 'protobuf_unittest') ++ self.assertEqual( ++ unittest_import_pb2._IMPORTENUM.file.package, ++ 'protobuf_unittest_import') ++ ++ def testExtensionRange(self): ++ self.assertEqual( ++ unittest_pb2.TestAllTypes.DESCRIPTOR.extension_ranges, []) ++ self.assertEqual( ++ unittest_pb2.TestAllExtensions.DESCRIPTOR.extension_ranges, ++ [(1, MAX_EXTENSION)]) ++ self.assertEqual( ++ unittest_pb2.TestMultipleExtensionRanges.DESCRIPTOR.extension_ranges, ++ [(42, 43), (4143, 4244), (65536, MAX_EXTENSION)]) ++ ++ def testFileDescriptor(self): ++ self.assertEqual(unittest_pb2.DESCRIPTOR.name, ++ 'google/protobuf/unittest.proto') ++ self.assertEqual(unittest_pb2.DESCRIPTOR.package, 'protobuf_unittest') ++ self.assertFalse(unittest_pb2.DESCRIPTOR.serialized_pb is None) ++ self.assertEqual(unittest_pb2.DESCRIPTOR.dependencies, ++ [unittest_import_pb2.DESCRIPTOR]) ++ self.assertEqual(unittest_import_pb2.DESCRIPTOR.dependencies, ++ [unittest_import_public_pb2.DESCRIPTOR]) ++ ++ def testNoGenericServices(self): ++ self.assertTrue(hasattr(unittest_no_generic_services_pb2, "TestMessage")) ++ self.assertTrue(hasattr(unittest_no_generic_services_pb2, "FOO")) ++ self.assertTrue(hasattr(unittest_no_generic_services_pb2, "test_extension")) ++ ++ # Make sure unittest_no_generic_services_pb2 has no services subclassing ++ # Proto2 Service class. ++ if hasattr(unittest_no_generic_services_pb2, "TestService"): ++ self.assertFalse(issubclass(unittest_no_generic_services_pb2.TestService, ++ service.Service)) ++ ++ def testMessageTypesByName(self): ++ file_type = unittest_pb2.DESCRIPTOR ++ self.assertEqual( ++ unittest_pb2._TESTALLTYPES, ++ file_type.message_types_by_name[unittest_pb2._TESTALLTYPES.name]) ++ ++ # Nested messages shouldn't be included in the message_types_by_name ++ # dictionary (like in the C++ API). ++ self.assertFalse( ++ unittest_pb2._TESTALLTYPES_NESTEDMESSAGE.name in ++ file_type.message_types_by_name) ++ ++ def testEnumTypesByName(self): ++ file_type = unittest_pb2.DESCRIPTOR ++ self.assertEqual( ++ unittest_pb2._FOREIGNENUM, ++ file_type.enum_types_by_name[unittest_pb2._FOREIGNENUM.name]) ++ ++ def testExtensionsByName(self): ++ file_type = unittest_pb2.DESCRIPTOR ++ self.assertEqual( ++ unittest_pb2.my_extension_string, ++ file_type.extensions_by_name[unittest_pb2.my_extension_string.name]) ++ ++ def testPublicImports(self): ++ # Test public imports as embedded message. ++ all_type_proto = unittest_pb2.TestAllTypes() ++ self.assertEqual(0, all_type_proto.optional_public_import_message.e) ++ ++ # PublicImportMessage is actually defined in unittest_import_public_pb2 ++ # module, and is public imported by unittest_import_pb2 module. ++ public_import_proto = unittest_import_pb2.PublicImportMessage() ++ self.assertEqual(0, public_import_proto.e) ++ self.assertTrue(unittest_import_public_pb2.PublicImportMessage is ++ unittest_import_pb2.PublicImportMessage) ++ ++ def testBadIdentifiers(self): ++ # We're just testing that the code was imported without problems. ++ message = test_bad_identifiers_pb2.TestBadIdentifiers() ++ self.assertEqual(message.Extensions[test_bad_identifiers_pb2.message], ++ "foo") ++ self.assertEqual(message.Extensions[test_bad_identifiers_pb2.descriptor], ++ "bar") ++ self.assertEqual(message.Extensions[test_bad_identifiers_pb2.reflection], ++ "baz") ++ self.assertEqual(message.Extensions[test_bad_identifiers_pb2.service], ++ "qux") ++ ++ def testOneof(self): ++ desc = unittest_pb2.TestAllTypes.DESCRIPTOR ++ self.assertEqual(1, len(desc.oneofs)) ++ self.assertEqual('oneof_field', desc.oneofs[0].name) ++ self.assertEqual(0, desc.oneofs[0].index) ++ self.assertIs(desc, desc.oneofs[0].containing_type) ++ self.assertIs(desc.oneofs[0], desc.oneofs_by_name['oneof_field']) ++ nested_names = set(['oneof_uint32', 'oneof_nested_message', ++ 'oneof_string', 'oneof_bytes']) ++ self.assertSameElements( ++ nested_names, ++ [field.name for field in desc.oneofs[0].fields]) ++ for field_name, field_desc in desc.fields_by_name.items(): ++ if field_name in nested_names: ++ self.assertIs(desc.oneofs[0], field_desc.containing_oneof) ++ else: ++ self.assertIsNone(field_desc.containing_oneof) ++ ++ ++class SymbolDatabaseRegistrationTest(basetest.TestCase): ++ """Checks that messages, enums and files are correctly registered.""" ++ ++ def testGetSymbol(self): ++ self.assertEquals( ++ unittest_pb2.TestAllTypes, symbol_database.Default().GetSymbol( ++ 'protobuf_unittest.TestAllTypes')) ++ self.assertEquals( ++ unittest_pb2.TestAllTypes.NestedMessage, ++ symbol_database.Default().GetSymbol( ++ 'protobuf_unittest.TestAllTypes.NestedMessage')) ++ with self.assertRaises(KeyError): ++ symbol_database.Default().GetSymbol('protobuf_unittest.NestedMessage') ++ self.assertEquals( ++ unittest_pb2.TestAllTypes.OptionalGroup, ++ symbol_database.Default().GetSymbol( ++ 'protobuf_unittest.TestAllTypes.OptionalGroup')) ++ self.assertEquals( ++ unittest_pb2.TestAllTypes.RepeatedGroup, ++ symbol_database.Default().GetSymbol( ++ 'protobuf_unittest.TestAllTypes.RepeatedGroup')) ++ ++ def testEnums(self): ++ self.assertEquals( ++ 'protobuf_unittest.ForeignEnum', ++ symbol_database.Default().pool.FindEnumTypeByName( ++ 'protobuf_unittest.ForeignEnum').full_name) ++ self.assertEquals( ++ 'protobuf_unittest.TestAllTypes.NestedEnum', ++ symbol_database.Default().pool.FindEnumTypeByName( ++ 'protobuf_unittest.TestAllTypes.NestedEnum').full_name) ++ ++ def testFindFileByName(self): ++ self.assertEquals( ++ 'google/protobuf/unittest.proto', ++ symbol_database.Default().pool.FindFileByName( ++ 'google/protobuf/unittest.proto').name) ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/message_factory_python_test.py +@@ -0,0 +1,54 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Tests for ..public.message_factory for the pure Python implementation.""" ++ ++import os ++os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python' ++ ++# We must set the implementation version above before the google3 imports. ++# pylint: disable=g-import-not-at-top ++from google.apputils import basetest ++from google.protobuf.internal import api_implementation ++# Run all tests from the original module by putting them in our namespace. ++# pylint: disable=wildcard-import ++from google.protobuf.internal.message_factory_test import * ++ ++ ++class ConfirmPurePythonTest(basetest.TestCase): ++ ++ def testImplementationSetting(self): ++ self.assertEqual('python', api_implementation.Type()) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/message_factory_test.py +@@ -0,0 +1,131 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Tests for google.protobuf.message_factory.""" ++ ++__author__ = 'matthewtoia@google.com (Matt Toia)' ++ ++from google.apputils import basetest ++from google.protobuf import descriptor_pb2 ++from google.protobuf.internal import factory_test1_pb2 ++from google.protobuf.internal import factory_test2_pb2 ++from google.protobuf import descriptor_database ++from google.protobuf import descriptor_pool ++from google.protobuf import message_factory ++ ++ ++class MessageFactoryTest(basetest.TestCase): ++ ++ def setUp(self): ++ self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString( ++ factory_test1_pb2.DESCRIPTOR.serialized_pb) ++ self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString( ++ factory_test2_pb2.DESCRIPTOR.serialized_pb) ++ ++ def _ExerciseDynamicClass(self, cls): ++ msg = cls() ++ msg.mandatory = 42 ++ msg.nested_factory_2_enum = 0 ++ msg.nested_factory_2_message.value = 'nested message value' ++ msg.factory_1_message.factory_1_enum = 1 ++ msg.factory_1_message.nested_factory_1_enum = 0 ++ msg.factory_1_message.nested_factory_1_message.value = ( ++ 'nested message value') ++ msg.factory_1_message.scalar_value = 22 ++ msg.factory_1_message.list_value.extend([u'one', u'two', u'three']) ++ msg.factory_1_message.list_value.append(u'four') ++ msg.factory_1_enum = 1 ++ msg.nested_factory_1_enum = 0 ++ msg.nested_factory_1_message.value = 'nested message value' ++ msg.circular_message.mandatory = 1 ++ msg.circular_message.circular_message.mandatory = 2 ++ msg.circular_message.scalar_value = 'one deep' ++ msg.scalar_value = 'zero deep' ++ msg.list_value.extend([u'four', u'three', u'two']) ++ msg.list_value.append(u'one') ++ msg.grouped.add() ++ msg.grouped[0].part_1 = 'hello' ++ msg.grouped[0].part_2 = 'world' ++ msg.grouped.add(part_1='testing', part_2='123') ++ msg.loop.loop.mandatory = 2 ++ msg.loop.loop.loop.loop.mandatory = 4 ++ serialized = msg.SerializeToString() ++ converted = factory_test2_pb2.Factory2Message.FromString(serialized) ++ reserialized = converted.SerializeToString() ++ self.assertEquals(serialized, reserialized) ++ result = cls.FromString(reserialized) ++ self.assertEquals(msg, result) ++ ++ def testGetPrototype(self): ++ db = descriptor_database.DescriptorDatabase() ++ pool = descriptor_pool.DescriptorPool(db) ++ db.Add(self.factory_test1_fd) ++ db.Add(self.factory_test2_fd) ++ factory = message_factory.MessageFactory() ++ cls = factory.GetPrototype(pool.FindMessageTypeByName( ++ 'google.protobuf.python.internal.Factory2Message')) ++ self.assertIsNot(cls, factory_test2_pb2.Factory2Message) ++ self._ExerciseDynamicClass(cls) ++ cls2 = factory.GetPrototype(pool.FindMessageTypeByName( ++ 'google.protobuf.python.internal.Factory2Message')) ++ self.assertIs(cls, cls2) ++ ++ def testGetMessages(self): ++ # performed twice because multiple calls with the same input must be allowed ++ for _ in range(2): ++ messages = message_factory.GetMessages([self.factory_test2_fd, ++ self.factory_test1_fd]) ++ self.assertContainsSubset( ++ ['google.protobuf.python.internal.Factory2Message', ++ 'google.protobuf.python.internal.Factory1Message'], ++ list(messages.keys())) ++ self._ExerciseDynamicClass( ++ messages['google.protobuf.python.internal.Factory2Message']) ++ self.assertContainsSubset( ++ ['google.protobuf.python.internal.Factory2Message.one_more_field', ++ 'google.protobuf.python.internal.another_field'], ++ (list(messages['google.protobuf.python.internal.Factory1Message'] ++ ._extensions_by_name.keys()))) ++ factory_msg1 = messages['google.protobuf.python.internal.Factory1Message'] ++ msg1 = messages['google.protobuf.python.internal.Factory1Message']() ++ ext1 = factory_msg1._extensions_by_name[ ++ 'google.protobuf.python.internal.Factory2Message.one_more_field'] ++ ext2 = factory_msg1._extensions_by_name[ ++ 'google.protobuf.python.internal.another_field'] ++ msg1.Extensions[ext1] = 'test1' ++ msg1.Extensions[ext2] = 'test2' ++ self.assertEquals('test1', msg1.Extensions[ext1]) ++ self.assertEquals('test2', msg1.Extensions[ext2]) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/message_listener.py +@@ -0,0 +1,78 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Defines a listener interface for observing certain ++state transitions on Message objects. ++ ++Also defines a null implementation of this interface. ++""" ++ ++__author__ = 'robinson@google.com (Will Robinson)' ++ ++ ++class MessageListener(object): ++ ++ """Listens for modifications made to a message. Meant to be registered via ++ Message._SetListener(). ++ ++ Attributes: ++ dirty: If True, then calling Modified() would be a no-op. This can be ++ used to avoid these calls entirely in the common case. ++ """ ++ ++ def Modified(self): ++ """Called every time the message is modified in such a way that the parent ++ message may need to be updated. This currently means either: ++ (a) The message was modified for the first time, so the parent message ++ should henceforth mark the message as present. ++ (b) The message's cached byte size became dirty -- i.e. the message was ++ modified for the first time after a previous call to ByteSize(). ++ Therefore the parent should also mark its byte size as dirty. ++ Note that (a) implies (b), since new objects start out with a client cached ++ size (zero). However, we document (a) explicitly because it is important. ++ ++ Modified() will *only* be called in response to one of these two events -- ++ not every time the sub-message is modified. ++ ++ Note that if the listener's |dirty| attribute is true, then calling ++ Modified at the moment would be a no-op, so it can be skipped. Performance- ++ sensitive callers should check this attribute directly before calling since ++ it will be true most of the time. ++ """ ++ ++ raise NotImplementedError ++ ++ ++class NullMessageListener(object): ++ ++ """No-op MessageListener implementation.""" ++ ++ def Modified(self): ++ pass +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/message_python_test.py +@@ -0,0 +1,54 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Tests for ..public.message for the pure Python implementation.""" ++ ++import os ++os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python' ++ ++# We must set the implementation version above before the google3 imports. ++# pylint: disable=g-import-not-at-top ++from google.apputils import basetest ++from google.protobuf.internal import api_implementation ++# Run all tests from the original module by putting them in our namespace. ++# pylint: disable=wildcard-import ++from google.protobuf.internal.message_test import * ++ ++ ++class ConfirmPurePythonTest(basetest.TestCase): ++ ++ def testImplementationSetting(self): ++ self.assertEqual('python', api_implementation.Type()) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/message_test.py +@@ -0,0 +1,681 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Tests python protocol buffers against the golden message. ++ ++Note that the golden messages exercise every known field type, thus this ++test ends up exercising and verifying nearly all of the parsing and ++serialization code in the whole library. ++ ++TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of ++sense to call this a test of the "message" module, which only declares an ++abstract interface. ++""" ++ ++__author__ = 'gps@google.com (Gregory P. Smith)' ++ ++import copy ++import math ++import operator ++import pickle ++import sys ++ ++from google.apputils import basetest ++from google.protobuf import unittest_pb2 ++from google.protobuf.internal import api_implementation ++from google.protobuf.internal import test_util ++from google.protobuf import message ++ ++# Python pre-2.6 does not have isinf() or isnan() functions, so we have ++# to provide our own. ++def isnan(val): ++ # NaN is never equal to itself. ++ return val != val ++def isinf(val): ++ # Infinity times zero equals NaN. ++ return not isnan(val) and isnan(val * 0) ++def IsPosInf(val): ++ return isinf(val) and (val > 0) ++def IsNegInf(val): ++ return isinf(val) and (val < 0) ++ ++ ++class MessageTest(basetest.TestCase): ++ ++ def testBadUtf8String(self): ++ if api_implementation.Type() != 'python': ++ self.skipTest("Skipping testBadUtf8String, currently only the python " ++ "api implementation raises UnicodeDecodeError when a " ++ "string field contains bad utf-8.") ++ bad_utf8_data = test_util.GoldenFileData('bad_utf8_string') ++ with self.assertRaises(UnicodeDecodeError) as context: ++ unittest_pb2.TestAllTypes.FromString(bad_utf8_data) ++ self.assertIn('field: protobuf_unittest.TestAllTypes.optional_string', ++ str(context.exception)) ++ ++ def testGoldenMessage(self): ++ golden_data = test_util.GoldenFileData( ++ 'golden_message_oneof_implemented') ++ golden_message = unittest_pb2.TestAllTypes() ++ golden_message.ParseFromString(golden_data) ++ test_util.ExpectAllFieldsSet(self, golden_message) ++ self.assertEqual(golden_data, golden_message.SerializeToString()) ++ golden_copy = copy.deepcopy(golden_message) ++ self.assertEqual(golden_data, golden_copy.SerializeToString()) ++ ++ def testGoldenExtensions(self): ++ golden_data = test_util.GoldenFileData('golden_message') ++ golden_message = unittest_pb2.TestAllExtensions() ++ golden_message.ParseFromString(golden_data) ++ all_set = unittest_pb2.TestAllExtensions() ++ test_util.SetAllExtensions(all_set) ++ self.assertEquals(all_set, golden_message) ++ self.assertEqual(golden_data, golden_message.SerializeToString()) ++ golden_copy = copy.deepcopy(golden_message) ++ self.assertEqual(golden_data, golden_copy.SerializeToString()) ++ ++ def testGoldenPackedMessage(self): ++ golden_data = test_util.GoldenFileData('golden_packed_fields_message') ++ golden_message = unittest_pb2.TestPackedTypes() ++ golden_message.ParseFromString(golden_data) ++ all_set = unittest_pb2.TestPackedTypes() ++ test_util.SetAllPackedFields(all_set) ++ self.assertEquals(all_set, golden_message) ++ self.assertEqual(golden_data, all_set.SerializeToString()) ++ golden_copy = copy.deepcopy(golden_message) ++ self.assertEqual(golden_data, golden_copy.SerializeToString()) ++ ++ def testGoldenPackedExtensions(self): ++ golden_data = test_util.GoldenFileData('golden_packed_fields_message') ++ golden_message = unittest_pb2.TestPackedExtensions() ++ golden_message.ParseFromString(golden_data) ++ all_set = unittest_pb2.TestPackedExtensions() ++ test_util.SetAllPackedExtensions(all_set) ++ self.assertEquals(all_set, golden_message) ++ self.assertEqual(golden_data, all_set.SerializeToString()) ++ golden_copy = copy.deepcopy(golden_message) ++ self.assertEqual(golden_data, golden_copy.SerializeToString()) ++ ++ def testPickleSupport(self): ++ golden_data = test_util.GoldenFileData('golden_message') ++ golden_message = unittest_pb2.TestAllTypes() ++ golden_message.ParseFromString(golden_data) ++ pickled_message = pickle.dumps(golden_message) ++ ++ unpickled_message = pickle.loads(pickled_message) ++ self.assertEquals(unpickled_message, golden_message) ++ ++ ++ def testPickleIncompleteProto(self): ++ golden_message = unittest_pb2.TestRequired(a=1) ++ pickled_message = pickle.dumps(golden_message) ++ ++ unpickled_message = pickle.loads(pickled_message) ++ self.assertEquals(unpickled_message, golden_message) ++ self.assertEquals(unpickled_message.a, 1) ++ # This is still an incomplete proto - so serializing should fail ++ self.assertRaises(message.EncodeError, unpickled_message.SerializeToString) ++ ++ def testPositiveInfinity(self): ++ golden_data = (b'\x5D\x00\x00\x80\x7F' ++ b'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F' ++ b'\xCD\x02\x00\x00\x80\x7F' ++ b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F') ++ golden_message = unittest_pb2.TestAllTypes() ++ golden_message.ParseFromString(golden_data) ++ self.assertTrue(IsPosInf(golden_message.optional_float)) ++ self.assertTrue(IsPosInf(golden_message.optional_double)) ++ self.assertTrue(IsPosInf(golden_message.repeated_float[0])) ++ self.assertTrue(IsPosInf(golden_message.repeated_double[0])) ++ self.assertEqual(golden_data, golden_message.SerializeToString()) ++ ++ def testNegativeInfinity(self): ++ golden_data = (b'\x5D\x00\x00\x80\xFF' ++ b'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF' ++ b'\xCD\x02\x00\x00\x80\xFF' ++ b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF') ++ golden_message = unittest_pb2.TestAllTypes() ++ golden_message.ParseFromString(golden_data) ++ self.assertTrue(IsNegInf(golden_message.optional_float)) ++ self.assertTrue(IsNegInf(golden_message.optional_double)) ++ self.assertTrue(IsNegInf(golden_message.repeated_float[0])) ++ self.assertTrue(IsNegInf(golden_message.repeated_double[0])) ++ self.assertEqual(golden_data, golden_message.SerializeToString()) ++ ++ def testNotANumber(self): ++ golden_data = (b'\x5D\x00\x00\xC0\x7F' ++ b'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F' ++ b'\xCD\x02\x00\x00\xC0\x7F' ++ b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F') ++ golden_message = unittest_pb2.TestAllTypes() ++ golden_message.ParseFromString(golden_data) ++ self.assertTrue(isnan(golden_message.optional_float)) ++ self.assertTrue(isnan(golden_message.optional_double)) ++ self.assertTrue(isnan(golden_message.repeated_float[0])) ++ self.assertTrue(isnan(golden_message.repeated_double[0])) ++ ++ # The protocol buffer may serialize to any one of multiple different ++ # representations of a NaN. Rather than verify a specific representation, ++ # verify the serialized string can be converted into a correctly ++ # behaving protocol buffer. ++ serialized = golden_message.SerializeToString() ++ message = unittest_pb2.TestAllTypes() ++ message.ParseFromString(serialized) ++ self.assertTrue(isnan(message.optional_float)) ++ self.assertTrue(isnan(message.optional_double)) ++ self.assertTrue(isnan(message.repeated_float[0])) ++ self.assertTrue(isnan(message.repeated_double[0])) ++ ++ def testPositiveInfinityPacked(self): ++ golden_data = (b'\xA2\x06\x04\x00\x00\x80\x7F' ++ b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F') ++ golden_message = unittest_pb2.TestPackedTypes() ++ golden_message.ParseFromString(golden_data) ++ self.assertTrue(IsPosInf(golden_message.packed_float[0])) ++ self.assertTrue(IsPosInf(golden_message.packed_double[0])) ++ self.assertEqual(golden_data, golden_message.SerializeToString()) ++ ++ def testNegativeInfinityPacked(self): ++ golden_data = (b'\xA2\x06\x04\x00\x00\x80\xFF' ++ b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF') ++ golden_message = unittest_pb2.TestPackedTypes() ++ golden_message.ParseFromString(golden_data) ++ self.assertTrue(IsNegInf(golden_message.packed_float[0])) ++ self.assertTrue(IsNegInf(golden_message.packed_double[0])) ++ self.assertEqual(golden_data, golden_message.SerializeToString()) ++ ++ def testNotANumberPacked(self): ++ golden_data = (b'\xA2\x06\x04\x00\x00\xC0\x7F' ++ b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F') ++ golden_message = unittest_pb2.TestPackedTypes() ++ golden_message.ParseFromString(golden_data) ++ self.assertTrue(isnan(golden_message.packed_float[0])) ++ self.assertTrue(isnan(golden_message.packed_double[0])) ++ ++ serialized = golden_message.SerializeToString() ++ message = unittest_pb2.TestPackedTypes() ++ message.ParseFromString(serialized) ++ self.assertTrue(isnan(message.packed_float[0])) ++ self.assertTrue(isnan(message.packed_double[0])) ++ ++ def testExtremeFloatValues(self): ++ message = unittest_pb2.TestAllTypes() ++ ++ # Most positive exponent, no significand bits set. ++ kMostPosExponentNoSigBits = math.pow(2, 127) ++ message.optional_float = kMostPosExponentNoSigBits ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_float == kMostPosExponentNoSigBits) ++ ++ # Most positive exponent, one significand bit set. ++ kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127) ++ message.optional_float = kMostPosExponentOneSigBit ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_float == kMostPosExponentOneSigBit) ++ ++ # Repeat last two cases with values of same magnitude, but negative. ++ message.optional_float = -kMostPosExponentNoSigBits ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits) ++ ++ message.optional_float = -kMostPosExponentOneSigBit ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit) ++ ++ # Most negative exponent, no significand bits set. ++ kMostNegExponentNoSigBits = math.pow(2, -127) ++ message.optional_float = kMostNegExponentNoSigBits ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_float == kMostNegExponentNoSigBits) ++ ++ # Most negative exponent, one significand bit set. ++ kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127) ++ message.optional_float = kMostNegExponentOneSigBit ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_float == kMostNegExponentOneSigBit) ++ ++ # Repeat last two cases with values of the same magnitude, but negative. ++ message.optional_float = -kMostNegExponentNoSigBits ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits) ++ ++ message.optional_float = -kMostNegExponentOneSigBit ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit) ++ ++ def testExtremeDoubleValues(self): ++ message = unittest_pb2.TestAllTypes() ++ ++ # Most positive exponent, no significand bits set. ++ kMostPosExponentNoSigBits = math.pow(2, 1023) ++ message.optional_double = kMostPosExponentNoSigBits ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_double == kMostPosExponentNoSigBits) ++ ++ # Most positive exponent, one significand bit set. ++ kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023) ++ message.optional_double = kMostPosExponentOneSigBit ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_double == kMostPosExponentOneSigBit) ++ ++ # Repeat last two cases with values of same magnitude, but negative. ++ message.optional_double = -kMostPosExponentNoSigBits ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits) ++ ++ message.optional_double = -kMostPosExponentOneSigBit ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit) ++ ++ # Most negative exponent, no significand bits set. ++ kMostNegExponentNoSigBits = math.pow(2, -1023) ++ message.optional_double = kMostNegExponentNoSigBits ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_double == kMostNegExponentNoSigBits) ++ ++ # Most negative exponent, one significand bit set. ++ kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023) ++ message.optional_double = kMostNegExponentOneSigBit ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_double == kMostNegExponentOneSigBit) ++ ++ # Repeat last two cases with values of the same magnitude, but negative. ++ message.optional_double = -kMostNegExponentNoSigBits ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits) ++ ++ message.optional_double = -kMostNegExponentOneSigBit ++ message.ParseFromString(message.SerializeToString()) ++ self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit) ++ ++ def testFloatPrinting(self): ++ message = unittest_pb2.TestAllTypes() ++ message.optional_float = 2.0 ++ self.assertEqual(str(message), 'optional_float: 2.0\n') ++ ++ def testHighPrecisionFloatPrinting(self): ++ message = unittest_pb2.TestAllTypes() ++ message.optional_double = 0.12345678912345678 ++ if sys.version_info.major >= 3: ++ self.assertEqual(str(message), 'optional_double: 0.12345678912345678\n') ++ else: ++ self.assertEqual(str(message), 'optional_double: 0.123456789123\n') ++ ++ def testUnknownFieldPrinting(self): ++ populated = unittest_pb2.TestAllTypes() ++ test_util.SetAllNonLazyFields(populated) ++ empty = unittest_pb2.TestEmptyMessage() ++ empty.ParseFromString(populated.SerializeToString()) ++ self.assertEqual(str(empty), '') ++ ++ def testSortingRepeatedScalarFieldsDefaultComparator(self): ++ """Check some different types with the default comparator.""" ++ message = unittest_pb2.TestAllTypes() ++ ++ # TODO(mattp): would testing more scalar types strengthen test? ++ message.repeated_int32.append(1) ++ message.repeated_int32.append(3) ++ message.repeated_int32.append(2) ++ message.repeated_int32.sort() ++ self.assertEqual(message.repeated_int32[0], 1) ++ self.assertEqual(message.repeated_int32[1], 2) ++ self.assertEqual(message.repeated_int32[2], 3) ++ ++ message.repeated_float.append(1.1) ++ message.repeated_float.append(1.3) ++ message.repeated_float.append(1.2) ++ message.repeated_float.sort() ++ self.assertAlmostEqual(message.repeated_float[0], 1.1) ++ self.assertAlmostEqual(message.repeated_float[1], 1.2) ++ self.assertAlmostEqual(message.repeated_float[2], 1.3) ++ ++ message.repeated_string.append('a') ++ message.repeated_string.append('c') ++ message.repeated_string.append('b') ++ message.repeated_string.sort() ++ self.assertEqual(message.repeated_string[0], 'a') ++ self.assertEqual(message.repeated_string[1], 'b') ++ self.assertEqual(message.repeated_string[2], 'c') ++ ++ message.repeated_bytes.append(b'a') ++ message.repeated_bytes.append(b'c') ++ message.repeated_bytes.append(b'b') ++ message.repeated_bytes.sort() ++ self.assertEqual(message.repeated_bytes[0], b'a') ++ self.assertEqual(message.repeated_bytes[1], b'b') ++ self.assertEqual(message.repeated_bytes[2], b'c') ++ ++ def testSortingRepeatedScalarFieldsCustomComparator(self): ++ """Check some different types with custom comparator.""" ++ message = unittest_pb2.TestAllTypes() ++ ++ message.repeated_int32.append(-3) ++ message.repeated_int32.append(-2) ++ message.repeated_int32.append(-1) ++ message.repeated_int32.sort(key=abs) ++ self.assertEqual(message.repeated_int32[0], -1) ++ self.assertEqual(message.repeated_int32[1], -2) ++ self.assertEqual(message.repeated_int32[2], -3) ++ ++ message.repeated_string.append('aaa') ++ message.repeated_string.append('bb') ++ message.repeated_string.append('c') ++ message.repeated_string.sort(key=len) ++ self.assertEqual(message.repeated_string[0], 'c') ++ self.assertEqual(message.repeated_string[1], 'bb') ++ self.assertEqual(message.repeated_string[2], 'aaa') ++ ++ def testSortingRepeatedCompositeFieldsCustomComparator(self): ++ """Check passing a custom comparator to sort a repeated composite field.""" ++ message = unittest_pb2.TestAllTypes() ++ ++ message.repeated_nested_message.add().bb = 1 ++ message.repeated_nested_message.add().bb = 3 ++ message.repeated_nested_message.add().bb = 2 ++ message.repeated_nested_message.add().bb = 6 ++ message.repeated_nested_message.add().bb = 5 ++ message.repeated_nested_message.add().bb = 4 ++ message.repeated_nested_message.sort(key=operator.attrgetter('bb')) ++ self.assertEqual(message.repeated_nested_message[0].bb, 1) ++ self.assertEqual(message.repeated_nested_message[1].bb, 2) ++ self.assertEqual(message.repeated_nested_message[2].bb, 3) ++ self.assertEqual(message.repeated_nested_message[3].bb, 4) ++ self.assertEqual(message.repeated_nested_message[4].bb, 5) ++ self.assertEqual(message.repeated_nested_message[5].bb, 6) ++ ++ def testRepeatedCompositeFieldSortArguments(self): ++ """Check sorting a repeated composite field using list.sort() arguments.""" ++ message = unittest_pb2.TestAllTypes() ++ ++ get_bb = operator.attrgetter('bb') ++ cmp_bb = lambda a, b: cmp(a.bb, b.bb) ++ message.repeated_nested_message.add().bb = 1 ++ message.repeated_nested_message.add().bb = 3 ++ message.repeated_nested_message.add().bb = 2 ++ message.repeated_nested_message.add().bb = 6 ++ message.repeated_nested_message.add().bb = 5 ++ message.repeated_nested_message.add().bb = 4 ++ message.repeated_nested_message.sort(key=get_bb) ++ self.assertEqual([k.bb for k in message.repeated_nested_message], ++ [1, 2, 3, 4, 5, 6]) ++ message.repeated_nested_message.sort(key=get_bb, reverse=True) ++ self.assertEqual([k.bb for k in message.repeated_nested_message], ++ [6, 5, 4, 3, 2, 1]) ++ if sys.version_info.major >= 3: return # No cmp sorting in PY3. ++ message.repeated_nested_message.sort(sort_function=cmp_bb) ++ self.assertEqual([k.bb for k in message.repeated_nested_message], ++ [1, 2, 3, 4, 5, 6]) ++ message.repeated_nested_message.sort(cmp=cmp_bb, reverse=True) ++ self.assertEqual([k.bb for k in message.repeated_nested_message], ++ [6, 5, 4, 3, 2, 1]) ++ ++ def testRepeatedScalarFieldSortArguments(self): ++ """Check sorting a scalar field using list.sort() arguments.""" ++ message = unittest_pb2.TestAllTypes() ++ ++ message.repeated_int32.append(-3) ++ message.repeated_int32.append(-2) ++ message.repeated_int32.append(-1) ++ message.repeated_int32.sort(key=abs) ++ self.assertEqual(list(message.repeated_int32), [-1, -2, -3]) ++ message.repeated_int32.sort(key=abs, reverse=True) ++ self.assertEqual(list(message.repeated_int32), [-3, -2, -1]) ++ if sys.version_info.major < 3: # No cmp sorting in PY3. ++ abs_cmp = lambda a, b: cmp(abs(a), abs(b)) ++ message.repeated_int32.sort(sort_function=abs_cmp) ++ self.assertEqual(list(message.repeated_int32), [-1, -2, -3]) ++ message.repeated_int32.sort(cmp=abs_cmp, reverse=True) ++ self.assertEqual(list(message.repeated_int32), [-3, -2, -1]) ++ ++ message.repeated_string.append('aaa') ++ message.repeated_string.append('bb') ++ message.repeated_string.append('c') ++ message.repeated_string.sort(key=len) ++ self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa']) ++ message.repeated_string.sort(key=len, reverse=True) ++ self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c']) ++ if sys.version_info.major < 3: # No cmp sorting in PY3. ++ len_cmp = lambda a, b: cmp(len(a), len(b)) ++ message.repeated_string.sort(sort_function=len_cmp) ++ self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa']) ++ message.repeated_string.sort(cmp=len_cmp, reverse=True) ++ self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c']) ++ ++ def testRepeatedFieldsComparable(self): ++ m1 = unittest_pb2.TestAllTypes() ++ m2 = unittest_pb2.TestAllTypes() ++ m1.repeated_int32.append(0) ++ m1.repeated_int32.append(1) ++ m1.repeated_int32.append(2) ++ m2.repeated_int32.append(0) ++ m2.repeated_int32.append(1) ++ m2.repeated_int32.append(2) ++ m1.repeated_nested_message.add().bb = 1 ++ m1.repeated_nested_message.add().bb = 2 ++ m1.repeated_nested_message.add().bb = 3 ++ m2.repeated_nested_message.add().bb = 1 ++ m2.repeated_nested_message.add().bb = 2 ++ m2.repeated_nested_message.add().bb = 3 ++ ++ if sys.version_info.major >= 3: return # No cmp() in PY3. ++ ++ # These comparisons should not raise errors. ++ _ = m1 < m2 ++ _ = m1.repeated_nested_message < m2.repeated_nested_message ++ ++ # Make sure cmp always works. If it wasn't defined, these would be ++ # id() comparisons and would all fail. ++ self.assertEqual(cmp(m1, m2), 0) ++ self.assertEqual(cmp(m1.repeated_int32, m2.repeated_int32), 0) ++ self.assertEqual(cmp(m1.repeated_int32, [0, 1, 2]), 0) ++ self.assertEqual(cmp(m1.repeated_nested_message, ++ m2.repeated_nested_message), 0) ++ with self.assertRaises(TypeError): ++ # Can't compare repeated composite containers to lists. ++ cmp(m1.repeated_nested_message, m2.repeated_nested_message[:]) ++ ++ # TODO(anuraag): Implement extensiondict comparison in C++ and then add test ++ ++ def testParsingMerge(self): ++ """Check the merge behavior when a required or optional field appears ++ multiple times in the input.""" ++ messages = [ ++ unittest_pb2.TestAllTypes(), ++ unittest_pb2.TestAllTypes(), ++ unittest_pb2.TestAllTypes() ] ++ messages[0].optional_int32 = 1 ++ messages[1].optional_int64 = 2 ++ messages[2].optional_int32 = 3 ++ messages[2].optional_string = 'hello' ++ ++ merged_message = unittest_pb2.TestAllTypes() ++ merged_message.optional_int32 = 3 ++ merged_message.optional_int64 = 2 ++ merged_message.optional_string = 'hello' ++ ++ generator = unittest_pb2.TestParsingMerge.RepeatedFieldsGenerator() ++ generator.field1.extend(messages) ++ generator.field2.extend(messages) ++ generator.field3.extend(messages) ++ generator.ext1.extend(messages) ++ generator.ext2.extend(messages) ++ generator.group1.add().field1.MergeFrom(messages[0]) ++ generator.group1.add().field1.MergeFrom(messages[1]) ++ generator.group1.add().field1.MergeFrom(messages[2]) ++ generator.group2.add().field1.MergeFrom(messages[0]) ++ generator.group2.add().field1.MergeFrom(messages[1]) ++ generator.group2.add().field1.MergeFrom(messages[2]) ++ ++ data = generator.SerializeToString() ++ parsing_merge = unittest_pb2.TestParsingMerge() ++ parsing_merge.ParseFromString(data) ++ ++ # Required and optional fields should be merged. ++ self.assertEqual(parsing_merge.required_all_types, merged_message) ++ self.assertEqual(parsing_merge.optional_all_types, merged_message) ++ self.assertEqual(parsing_merge.optionalgroup.optional_group_all_types, ++ merged_message) ++ self.assertEqual(parsing_merge.Extensions[ ++ unittest_pb2.TestParsingMerge.optional_ext], ++ merged_message) ++ ++ # Repeated fields should not be merged. ++ self.assertEqual(len(parsing_merge.repeated_all_types), 3) ++ self.assertEqual(len(parsing_merge.repeatedgroup), 3) ++ self.assertEqual(len(parsing_merge.Extensions[ ++ unittest_pb2.TestParsingMerge.repeated_ext]), 3) ++ ++ def ensureNestedMessageExists(self, msg, attribute): ++ """Make sure that a nested message object exists. ++ ++ As soon as a nested message attribute is accessed, it will be present in the ++ _fields dict, without being marked as actually being set. ++ """ ++ getattr(msg, attribute) ++ self.assertFalse(msg.HasField(attribute)) ++ ++ def testOneofGetCaseNonexistingField(self): ++ m = unittest_pb2.TestAllTypes() ++ self.assertRaises(ValueError, m.WhichOneof, 'no_such_oneof_field') ++ ++ def testOneofSemantics(self): ++ m = unittest_pb2.TestAllTypes() ++ self.assertIs(None, m.WhichOneof('oneof_field')) ++ ++ m.oneof_uint32 = 11 ++ self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field')) ++ self.assertTrue(m.HasField('oneof_uint32')) ++ ++ m.oneof_string = u'foo' ++ self.assertEqual('oneof_string', m.WhichOneof('oneof_field')) ++ self.assertFalse(m.HasField('oneof_uint32')) ++ self.assertTrue(m.HasField('oneof_string')) ++ ++ m.oneof_nested_message.bb = 11 ++ self.assertEqual('oneof_nested_message', m.WhichOneof('oneof_field')) ++ self.assertFalse(m.HasField('oneof_string')) ++ self.assertTrue(m.HasField('oneof_nested_message')) ++ ++ m.oneof_bytes = b'bb' ++ self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field')) ++ self.assertFalse(m.HasField('oneof_nested_message')) ++ self.assertTrue(m.HasField('oneof_bytes')) ++ ++ def testOneofCompositeFieldReadAccess(self): ++ m = unittest_pb2.TestAllTypes() ++ m.oneof_uint32 = 11 ++ ++ self.ensureNestedMessageExists(m, 'oneof_nested_message') ++ self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field')) ++ self.assertEqual(11, m.oneof_uint32) ++ ++ def testOneofHasField(self): ++ m = unittest_pb2.TestAllTypes() ++ self.assertFalse(m.HasField('oneof_field')) ++ m.oneof_uint32 = 11 ++ self.assertTrue(m.HasField('oneof_field')) ++ m.oneof_bytes = b'bb' ++ self.assertTrue(m.HasField('oneof_field')) ++ m.ClearField('oneof_bytes') ++ self.assertFalse(m.HasField('oneof_field')) ++ ++ def testOneofClearField(self): ++ m = unittest_pb2.TestAllTypes() ++ m.oneof_uint32 = 11 ++ m.ClearField('oneof_field') ++ self.assertFalse(m.HasField('oneof_field')) ++ self.assertFalse(m.HasField('oneof_uint32')) ++ self.assertIs(None, m.WhichOneof('oneof_field')) ++ ++ def testOneofClearSetField(self): ++ m = unittest_pb2.TestAllTypes() ++ m.oneof_uint32 = 11 ++ m.ClearField('oneof_uint32') ++ self.assertFalse(m.HasField('oneof_field')) ++ self.assertFalse(m.HasField('oneof_uint32')) ++ self.assertIs(None, m.WhichOneof('oneof_field')) ++ ++ def testOneofClearUnsetField(self): ++ m = unittest_pb2.TestAllTypes() ++ m.oneof_uint32 = 11 ++ self.ensureNestedMessageExists(m, 'oneof_nested_message') ++ m.ClearField('oneof_nested_message') ++ self.assertEqual(11, m.oneof_uint32) ++ self.assertTrue(m.HasField('oneof_field')) ++ self.assertTrue(m.HasField('oneof_uint32')) ++ self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field')) ++ ++ def testOneofDeserialize(self): ++ m = unittest_pb2.TestAllTypes() ++ m.oneof_uint32 = 11 ++ m2 = unittest_pb2.TestAllTypes() ++ m2.ParseFromString(m.SerializeToString()) ++ self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field')) ++ ++ def testSortEmptyRepeatedCompositeContainer(self): ++ """Exercise a scenario that has led to segfaults in the past. ++ """ ++ m = unittest_pb2.TestAllTypes() ++ m.repeated_nested_message.sort() ++ ++ def testHasFieldOnRepeatedField(self): ++ """Using HasField on a repeated field should raise an exception. ++ """ ++ m = unittest_pb2.TestAllTypes() ++ with self.assertRaises(ValueError) as _: ++ m.HasField('repeated_int32') ++ ++ ++class ValidTypeNamesTest(basetest.TestCase): ++ ++ def assertImportFromName(self, msg, base_name): ++ # Parse to extra 'some.name' as a string. ++ tp_name = str(type(msg)).split("'")[1] ++ valid_names = ('Repeated%sContainer' % base_name, ++ 'Repeated%sFieldContainer' % base_name) ++ self.assertTrue(any(tp_name.endswith(v) for v in valid_names), ++ '%r does end with any of %r' % (tp_name, valid_names)) ++ ++ parts = tp_name.split('.') ++ class_name = parts[-1] ++ module_name = '.'.join(parts[:-1]) ++ __import__(module_name, fromlist=[class_name]) ++ ++ def testTypeNamesCanBeImported(self): ++ # If import doesn't work, pickling won't work either. ++ pb = unittest_pb2.TestAllTypes() ++ self.assertImportFromName(pb.repeated_int32, 'Scalar') ++ self.assertImportFromName(pb.repeated_nested_message, 'Composite') ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/missing_enum_values.proto +@@ -0,0 +1,50 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++package google.protobuf.python.internal; ++ ++message TestEnumValues { ++ enum NestedEnum { ++ ZERO = 0; ++ ONE = 1; ++ } ++ optional NestedEnum optional_nested_enum = 1; ++ repeated NestedEnum repeated_nested_enum = 2; ++ repeated NestedEnum packed_nested_enum = 3 [packed = true]; ++} ++ ++message TestMissingEnumValues { ++ enum NestedEnum { ++ TWO = 2; ++ } ++ optional NestedEnum optional_nested_enum = 1; ++ repeated NestedEnum repeated_nested_enum = 2; ++ repeated NestedEnum packed_nested_enum = 3 [packed = true]; ++} +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/more_extensions.proto +@@ -0,0 +1,58 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: robinson@google.com (Will Robinson) ++ ++ ++package google.protobuf.internal; ++ ++ ++message TopLevelMessage { ++ optional ExtendedMessage submessage = 1; ++} ++ ++ ++message ExtendedMessage { ++ extensions 1 to max; ++} ++ ++ ++message ForeignMessage { ++ optional int32 foreign_message_int = 1; ++} ++ ++ ++extend ExtendedMessage { ++ optional int32 optional_int_extension = 1; ++ optional ForeignMessage optional_message_extension = 2; ++ ++ repeated int32 repeated_int_extension = 3; ++ repeated ForeignMessage repeated_message_extension = 4; ++} +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/more_extensions_dynamic.proto +@@ -0,0 +1,49 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: jasonh@google.com (Jason Hsueh) ++// ++// This file is used to test a corner case in the CPP implementation where the ++// generated C++ type is available for the extendee, but the extension is ++// defined in a file whose C++ type is not in the binary. ++ ++ ++import "google/protobuf/internal/more_extensions.proto"; ++ ++package google.protobuf.internal; ++ ++message DynamicMessageType { ++ optional int32 a = 1; ++} ++ ++extend ExtendedMessage { ++ optional int32 dynamic_int32_extension = 100; ++ optional DynamicMessageType dynamic_message_extension = 101; ++} +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/more_messages.proto +@@ -0,0 +1,51 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: robinson@google.com (Will Robinson) ++ ++ ++package google.protobuf.internal; ++ ++// A message where tag numbers are listed out of order, to allow us to test our ++// canonicalization of serialized output, which should always be in tag order. ++// We also mix in some extensions for extra fun. ++message OutOfOrderFields { ++ optional sint32 optional_sint32 = 5; ++ extensions 4 to 4; ++ optional uint32 optional_uint32 = 3; ++ extensions 2 to 2; ++ optional int32 optional_int32 = 1; ++}; ++ ++ ++extend OutOfOrderFields { ++ optional uint64 optional_uint64 = 4; ++ optional int64 optional_int64 = 2; ++} +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/python_message.py +@@ -0,0 +1,1244 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# Copyright 2007 Google Inc. All Rights Reserved. ++# ++# This code is meant to work on Python 2.4 and above only. ++# ++# TODO(robinson): Helpers for verbose, common checks like seeing if a ++# descriptor's cpp_type is CPPTYPE_MESSAGE. ++ ++"""Contains a metaclass and helper functions used to create ++protocol message classes from Descriptor objects at runtime. ++ ++Recall that a metaclass is the "type" of a class. ++(A class is to a metaclass what an instance is to a class.) ++ ++In this case, we use the GeneratedProtocolMessageType metaclass ++to inject all the useful functionality into the classes ++output by the protocol compiler at compile-time. ++ ++The upshot of all this is that the real implementation ++details for ALL pure-Python protocol buffers are *here in ++this file*. ++""" ++ ++__author__ = 'robinson@google.com (Will Robinson)' ++ ++from io import BytesIO ++import sys ++import struct ++import weakref ++ ++import six ++import six.moves.copyreg as copyreg ++ ++# We use "as" to avoid name collisions with variables. ++from google.protobuf.internal import containers ++from google.protobuf.internal import decoder ++from google.protobuf.internal import encoder ++from google.protobuf.internal import enum_type_wrapper ++from google.protobuf.internal import message_listener as message_listener_mod ++from google.protobuf.internal import type_checkers ++from google.protobuf.internal import wire_format ++from google.protobuf import descriptor as descriptor_mod ++from google.protobuf import message as message_mod ++from google.protobuf import text_format ++ ++_FieldDescriptor = descriptor_mod.FieldDescriptor ++ ++ ++def NewMessage(bases, descriptor, dictionary): ++ _AddClassAttributesForNestedExtensions(descriptor, dictionary) ++ _AddSlots(descriptor, dictionary) ++ return bases ++ ++ ++def InitMessage(descriptor, cls): ++ cls._decoders_by_tag = {} ++ cls._extensions_by_name = {} ++ cls._extensions_by_number = {} ++ if (descriptor.has_options and ++ descriptor.GetOptions().message_set_wire_format): ++ cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = ( ++ decoder.MessageSetItemDecoder(cls._extensions_by_number), None) ++ ++ # Attach stuff to each FieldDescriptor for quick lookup later on. ++ for field in descriptor.fields: ++ _AttachFieldHelpers(cls, field) ++ ++ _AddEnumValues(descriptor, cls) ++ _AddInitMethod(descriptor, cls) ++ _AddPropertiesForFields(descriptor, cls) ++ _AddPropertiesForExtensions(descriptor, cls) ++ _AddStaticMethods(cls) ++ _AddMessageMethods(descriptor, cls) ++ _AddPrivateHelperMethods(descriptor, cls) ++ copyreg.pickle(cls, lambda obj: (cls, (), obj.__getstate__())) ++ ++ ++# Stateless helpers for GeneratedProtocolMessageType below. ++# Outside clients should not access these directly. ++# ++# I opted not to make any of these methods on the metaclass, to make it more ++# clear that I'm not really using any state there and to keep clients from ++# thinking that they have direct access to these construction helpers. ++ ++ ++def _PropertyName(proto_field_name): ++ """Returns the name of the public property attribute which ++ clients can use to get and (in some cases) set the value ++ of a protocol message field. ++ ++ Args: ++ proto_field_name: The protocol message field name, exactly ++ as it appears (or would appear) in a .proto file. ++ """ ++ # TODO(robinson): Escape Python keywords (e.g., yield), and test this support. ++ # nnorwitz makes my day by writing: ++ # """ ++ # FYI. See the keyword module in the stdlib. This could be as simple as: ++ # ++ # if keyword.iskeyword(proto_field_name): ++ # return proto_field_name + "_" ++ # return proto_field_name ++ # """ ++ # Kenton says: The above is a BAD IDEA. People rely on being able to use ++ # getattr() and setattr() to reflectively manipulate field values. If we ++ # rename the properties, then every such user has to also make sure to apply ++ # the same transformation. Note that currently if you name a field "yield", ++ # you can still access it just fine using getattr/setattr -- it's not even ++ # that cumbersome to do so. ++ # TODO(kenton): Remove this method entirely if/when everyone agrees with my ++ # position. ++ return proto_field_name ++ ++ ++def _VerifyExtensionHandle(message, extension_handle): ++ """Verify that the given extension handle is valid.""" ++ ++ if not isinstance(extension_handle, _FieldDescriptor): ++ raise KeyError('HasExtension() expects an extension handle, got: %s' % ++ extension_handle) ++ ++ if not extension_handle.is_extension: ++ raise KeyError('"%s" is not an extension.' % extension_handle.full_name) ++ ++ if not extension_handle.containing_type: ++ raise KeyError('"%s" is missing a containing_type.' ++ % extension_handle.full_name) ++ ++ if extension_handle.containing_type is not message.DESCRIPTOR: ++ raise KeyError('Extension "%s" extends message type "%s", but this ' ++ 'message is of type "%s".' % ++ (extension_handle.full_name, ++ extension_handle.containing_type.full_name, ++ message.DESCRIPTOR.full_name)) ++ ++ ++def _AddSlots(message_descriptor, dictionary): ++ """Adds a __slots__ entry to dictionary, containing the names of all valid ++ attributes for this message type. ++ ++ Args: ++ message_descriptor: A Descriptor instance describing this message type. ++ dictionary: Class dictionary to which we'll add a '__slots__' entry. ++ """ ++ dictionary['__slots__'] = ['_cached_byte_size', ++ '_cached_byte_size_dirty', ++ '_fields', ++ '_unknown_fields', ++ '_is_present_in_parent', ++ '_listener', ++ '_listener_for_children', ++ '__weakref__', ++ '_oneofs'] ++ ++ ++def _IsMessageSetExtension(field): ++ return (field.is_extension and ++ field.containing_type.has_options and ++ field.containing_type.GetOptions().message_set_wire_format and ++ field.type == _FieldDescriptor.TYPE_MESSAGE and ++ field.message_type == field.extension_scope and ++ field.label == _FieldDescriptor.LABEL_OPTIONAL) ++ ++ ++def _AttachFieldHelpers(cls, field_descriptor): ++ is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED) ++ is_packed = (field_descriptor.has_options and ++ field_descriptor.GetOptions().packed) ++ ++ if _IsMessageSetExtension(field_descriptor): ++ field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number) ++ sizer = encoder.MessageSetItemSizer(field_descriptor.number) ++ else: ++ field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type]( ++ field_descriptor.number, is_repeated, is_packed) ++ sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type]( ++ field_descriptor.number, is_repeated, is_packed) ++ ++ field_descriptor._encoder = field_encoder ++ field_descriptor._sizer = sizer ++ field_descriptor._default_constructor = _DefaultValueConstructorForField( ++ field_descriptor) ++ ++ def AddDecoder(wiretype, is_packed): ++ tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype) ++ cls._decoders_by_tag[tag_bytes] = ( ++ type_checkers.TYPE_TO_DECODER[field_descriptor.type]( ++ field_descriptor.number, is_repeated, is_packed, ++ field_descriptor, field_descriptor._default_constructor), ++ field_descriptor if field_descriptor.containing_oneof is not None ++ else None) ++ ++ AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type], ++ False) ++ ++ if is_repeated and wire_format.IsTypePackable(field_descriptor.type): ++ # To support wire compatibility of adding packed = true, add a decoder for ++ # packed values regardless of the field's options. ++ AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True) ++ ++ ++def _AddClassAttributesForNestedExtensions(descriptor, dictionary): ++ extension_dict = descriptor.extensions_by_name ++ for extension_name, extension_field in extension_dict.items(): ++ assert extension_name not in dictionary ++ dictionary[extension_name] = extension_field ++ ++ ++def _AddEnumValues(descriptor, cls): ++ """Sets class-level attributes for all enum fields defined in this message. ++ ++ Also exporting a class-level object that can name enum values. ++ ++ Args: ++ descriptor: Descriptor object for this message type. ++ cls: Class we're constructing for this message type. ++ """ ++ for enum_type in descriptor.enum_types: ++ setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type)) ++ for enum_value in enum_type.values: ++ setattr(cls, enum_value.name, enum_value.number) ++ ++ ++def _DefaultValueConstructorForField(field): ++ """Returns a function which returns a default value for a field. ++ ++ Args: ++ field: FieldDescriptor object for this field. ++ ++ The returned function has one argument: ++ message: Message instance containing this field, or a weakref proxy ++ of same. ++ ++ That function in turn returns a default value for this field. The default ++ value may refer back to |message| via a weak reference. ++ """ ++ ++ if field.label == _FieldDescriptor.LABEL_REPEATED: ++ if field.has_default_value and field.default_value != []: ++ raise ValueError('Repeated field default value not empty list: %s' % ( ++ field.default_value)) ++ if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: ++ # We can't look at _concrete_class yet since it might not have ++ # been set. (Depends on order in which we initialize the classes). ++ message_type = field.message_type ++ def MakeRepeatedMessageDefault(message): ++ return containers.RepeatedCompositeFieldContainer( ++ message._listener_for_children, field.message_type) ++ return MakeRepeatedMessageDefault ++ else: ++ type_checker = type_checkers.GetTypeChecker(field) ++ def MakeRepeatedScalarDefault(message): ++ return containers.RepeatedScalarFieldContainer( ++ message._listener_for_children, type_checker) ++ return MakeRepeatedScalarDefault ++ ++ if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: ++ # _concrete_class may not yet be initialized. ++ message_type = field.message_type ++ def MakeSubMessageDefault(message): ++ result = message_type._concrete_class() ++ result._SetListener(message._listener_for_children) ++ return result ++ return MakeSubMessageDefault ++ ++ def MakeScalarDefault(message): ++ # TODO(protobuf-team): This may be broken since there may not be ++ # default_value. Combine with has_default_value somehow. ++ return field.default_value ++ return MakeScalarDefault ++ ++ ++def _AddInitMethod(message_descriptor, cls): ++ """Adds an __init__ method to cls.""" ++ fields = message_descriptor.fields ++ def init(self, **kwargs): ++ self._cached_byte_size = 0 ++ self._cached_byte_size_dirty = len(kwargs) > 0 ++ self._fields = {} ++ # Contains a mapping from oneof field descriptors to the descriptor ++ # of the currently set field in that oneof field. ++ self._oneofs = {} ++ ++ # _unknown_fields is () when empty for efficiency, and will be turned into ++ # a list if fields are added. ++ self._unknown_fields = () ++ self._is_present_in_parent = False ++ self._listener = message_listener_mod.NullMessageListener() ++ self._listener_for_children = _Listener(self) ++ for field_name, field_value in kwargs.items(): ++ field = _GetFieldByName(message_descriptor, field_name) ++ if field is None: ++ raise TypeError("%s() got an unexpected keyword argument '%s'" % ++ (message_descriptor.name, field_name)) ++ if field.label == _FieldDescriptor.LABEL_REPEATED: ++ copy = field._default_constructor(self) ++ if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite ++ for val in field_value: ++ copy.add().MergeFrom(val) ++ else: # Scalar ++ copy.extend(field_value) ++ self._fields[field] = copy ++ elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: ++ copy = field._default_constructor(self) ++ copy.MergeFrom(field_value) ++ self._fields[field] = copy ++ else: ++ setattr(self, field_name, field_value) ++ ++ init.__module__ = None ++ init.__doc__ = None ++ cls.__init__ = init ++ ++ ++def _GetFieldByName(message_descriptor, field_name): ++ """Returns a field descriptor by field name. ++ ++ Args: ++ message_descriptor: A Descriptor describing all fields in message. ++ field_name: The name of the field to retrieve. ++ Returns: ++ The field descriptor associated with the field name. ++ """ ++ try: ++ return message_descriptor.fields_by_name[field_name] ++ except KeyError: ++ raise ValueError('Protocol message has no "%s" field.' % field_name) ++ ++ ++def _AddPropertiesForFields(descriptor, cls): ++ """Adds properties for all fields in this protocol message type.""" ++ for field in descriptor.fields: ++ _AddPropertiesForField(field, cls) ++ ++ if descriptor.is_extendable: ++ # _ExtensionDict is just an adaptor with no state so we allocate a new one ++ # every time it is accessed. ++ cls.Extensions = property(lambda self: _ExtensionDict(self)) ++ ++ ++def _AddPropertiesForField(field, cls): ++ """Adds a public property for a protocol message field. ++ Clients can use this property to get and (in the case ++ of non-repeated scalar fields) directly set the value ++ of a protocol message field. ++ ++ Args: ++ field: A FieldDescriptor for this field. ++ cls: The class we're constructing. ++ """ ++ # Catch it if we add other types that we should ++ # handle specially here. ++ assert _FieldDescriptor.MAX_CPPTYPE == 10 ++ ++ constant_name = field.name.upper() + "_FIELD_NUMBER" ++ setattr(cls, constant_name, field.number) ++ ++ if field.label == _FieldDescriptor.LABEL_REPEATED: ++ _AddPropertiesForRepeatedField(field, cls) ++ elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: ++ _AddPropertiesForNonRepeatedCompositeField(field, cls) ++ else: ++ _AddPropertiesForNonRepeatedScalarField(field, cls) ++ ++ ++def _AddPropertiesForRepeatedField(field, cls): ++ """Adds a public property for a "repeated" protocol message field. Clients ++ can use this property to get the value of the field, which will be either a ++ _RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see ++ below). ++ ++ Note that when clients add values to these containers, we perform ++ type-checking in the case of repeated scalar fields, and we also set any ++ necessary "has" bits as a side-effect. ++ ++ Args: ++ field: A FieldDescriptor for this field. ++ cls: The class we're constructing. ++ """ ++ proto_field_name = field.name ++ property_name = _PropertyName(proto_field_name) ++ ++ def getter(self): ++ field_value = self._fields.get(field) ++ if field_value is None: ++ # Construct a new object to represent this field. ++ field_value = field._default_constructor(self) ++ ++ # Atomically check if another thread has preempted us and, if not, swap ++ # in the new object we just created. If someone has preempted us, we ++ # take that object and discard ours. ++ # WARNING: We are relying on setdefault() being atomic. This is true ++ # in CPython but we haven't investigated others. This warning appears ++ # in several other locations in this file. ++ field_value = self._fields.setdefault(field, field_value) ++ return field_value ++ getter.__module__ = None ++ getter.__doc__ = 'Getter for %s.' % proto_field_name ++ ++ # We define a setter just so we can throw an exception with a more ++ # helpful error message. ++ def setter(self, new_value): ++ raise AttributeError('Assignment not allowed to repeated field ' ++ '"%s" in protocol message object.' % proto_field_name) ++ ++ doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name ++ setattr(cls, property_name, property(getter, setter, doc=doc)) ++ ++ ++def _AddPropertiesForNonRepeatedScalarField(field, cls): ++ """Adds a public property for a nonrepeated, scalar protocol message field. ++ Clients can use this property to get and directly set the value of the field. ++ Note that when the client sets the value of a field by using this property, ++ all necessary "has" bits are set as a side-effect, and we also perform ++ type-checking. ++ ++ Args: ++ field: A FieldDescriptor for this field. ++ cls: The class we're constructing. ++ """ ++ proto_field_name = field.name ++ property_name = _PropertyName(proto_field_name) ++ type_checker = type_checkers.GetTypeChecker(field) ++ default_value = field.default_value ++ valid_values = set() ++ ++ def getter(self): ++ # TODO(protobuf-team): This may be broken since there may not be ++ # default_value. Combine with has_default_value somehow. ++ return self._fields.get(field, default_value) ++ getter.__module__ = None ++ getter.__doc__ = 'Getter for %s.' % proto_field_name ++ def field_setter(self, new_value): ++ # pylint: disable=protected-access ++ self._fields[field] = type_checker.CheckValue(new_value) ++ # Check _cached_byte_size_dirty inline to improve performance, since scalar ++ # setters are called frequently. ++ if not self._cached_byte_size_dirty: ++ self._Modified() ++ ++ if field.containing_oneof is not None: ++ def setter(self, new_value): ++ field_setter(self, new_value) ++ self._UpdateOneofState(field) ++ else: ++ setter = field_setter ++ ++ setter.__module__ = None ++ setter.__doc__ = 'Setter for %s.' % proto_field_name ++ ++ # Add a property to encapsulate the getter/setter. ++ doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name ++ setattr(cls, property_name, property(getter, setter, doc=doc)) ++ ++ ++def _AddPropertiesForNonRepeatedCompositeField(field, cls): ++ """Adds a public property for a nonrepeated, composite protocol message field. ++ A composite field is a "group" or "message" field. ++ ++ Clients can use this property to get the value of the field, but cannot ++ assign to the property directly. ++ ++ Args: ++ field: A FieldDescriptor for this field. ++ cls: The class we're constructing. ++ """ ++ # TODO(robinson): Remove duplication with similar method ++ # for non-repeated scalars. ++ proto_field_name = field.name ++ property_name = _PropertyName(proto_field_name) ++ ++ # TODO(komarek): Can anyone explain to me why we cache the message_type this ++ # way, instead of referring to field.message_type inside of getter(self)? ++ # What if someone sets message_type later on (which makes for simpler ++ # dyanmic proto descriptor and class creation code). ++ message_type = field.message_type ++ ++ def getter(self): ++ field_value = self._fields.get(field) ++ if field_value is None: ++ # Construct a new object to represent this field. ++ field_value = message_type._concrete_class() # use field.message_type? ++ field_value._SetListener( ++ _OneofListener(self, field) ++ if field.containing_oneof is not None ++ else self._listener_for_children) ++ ++ # Atomically check if another thread has preempted us and, if not, swap ++ # in the new object we just created. If someone has preempted us, we ++ # take that object and discard ours. ++ # WARNING: We are relying on setdefault() being atomic. This is true ++ # in CPython but we haven't investigated others. This warning appears ++ # in several other locations in this file. ++ field_value = self._fields.setdefault(field, field_value) ++ return field_value ++ getter.__module__ = None ++ getter.__doc__ = 'Getter for %s.' % proto_field_name ++ ++ # We define a setter just so we can throw an exception with a more ++ # helpful error message. ++ def setter(self, new_value): ++ raise AttributeError('Assignment not allowed to composite field ' ++ '"%s" in protocol message object.' % proto_field_name) ++ ++ # Add a property to encapsulate the getter. ++ doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name ++ setattr(cls, property_name, property(getter, setter, doc=doc)) ++ ++ ++def _AddPropertiesForExtensions(descriptor, cls): ++ """Adds properties for all fields in this protocol message type.""" ++ extension_dict = descriptor.extensions_by_name ++ for extension_name, extension_field in extension_dict.items(): ++ constant_name = extension_name.upper() + "_FIELD_NUMBER" ++ setattr(cls, constant_name, extension_field.number) ++ ++ ++def _AddStaticMethods(cls): ++ # TODO(robinson): This probably needs to be thread-safe(?) ++ def RegisterExtension(extension_handle): ++ extension_handle.containing_type = cls.DESCRIPTOR ++ _AttachFieldHelpers(cls, extension_handle) ++ ++ # Try to insert our extension, failing if an extension with the same number ++ # already exists. ++ actual_handle = cls._extensions_by_number.setdefault( ++ extension_handle.number, extension_handle) ++ if actual_handle is not extension_handle: ++ raise AssertionError( ++ 'Extensions "%s" and "%s" both try to extend message type "%s" with ' ++ 'field number %d.' % ++ (extension_handle.full_name, actual_handle.full_name, ++ cls.DESCRIPTOR.full_name, extension_handle.number)) ++ ++ cls._extensions_by_name[extension_handle.full_name] = extension_handle ++ ++ handle = extension_handle # avoid line wrapping ++ if _IsMessageSetExtension(handle): ++ # MessageSet extension. Also register under type name. ++ cls._extensions_by_name[ ++ extension_handle.message_type.full_name] = extension_handle ++ ++ cls.RegisterExtension = staticmethod(RegisterExtension) ++ ++ def FromString(s): ++ message = cls() ++ message.MergeFromString(s) ++ return message ++ cls.FromString = staticmethod(FromString) ++ ++ ++def _IsPresent(item): ++ """Given a (FieldDescriptor, value) tuple from _fields, return true if the ++ value should be included in the list returned by ListFields().""" ++ ++ if item[0].label == _FieldDescriptor.LABEL_REPEATED: ++ return bool(item[1]) ++ elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: ++ return item[1]._is_present_in_parent ++ else: ++ return True ++ ++ ++def _AddListFieldsMethod(message_descriptor, cls): ++ """Helper for _AddMessageMethods().""" ++ ++ def ListFields(self): ++ all_fields = [item for item in self._fields.items() if _IsPresent(item)] ++ all_fields.sort(key = lambda item: item[0].number) ++ return all_fields ++ ++ cls.ListFields = ListFields ++ ++ ++def _AddHasFieldMethod(message_descriptor, cls): ++ """Helper for _AddMessageMethods().""" ++ ++ singular_fields = {} ++ for field in message_descriptor.fields: ++ if field.label != _FieldDescriptor.LABEL_REPEATED: ++ singular_fields[field.name] = field ++ # Fields inside oneofs are never repeated (enforced by the compiler). ++ for field in message_descriptor.oneofs: ++ singular_fields[field.name] = field ++ ++ def HasField(self, field_name): ++ try: ++ field = singular_fields[field_name] ++ except KeyError: ++ raise ValueError( ++ 'Protocol message has no singular "%s" field.' % field_name) ++ ++ if isinstance(field, descriptor_mod.OneofDescriptor): ++ try: ++ return HasField(self, self._oneofs[field].name) ++ except KeyError: ++ return False ++ else: ++ if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: ++ value = self._fields.get(field) ++ return value is not None and value._is_present_in_parent ++ else: ++ return field in self._fields ++ ++ cls.HasField = HasField ++ ++ ++def _AddClearFieldMethod(message_descriptor, cls): ++ """Helper for _AddMessageMethods().""" ++ def ClearField(self, field_name): ++ try: ++ field = message_descriptor.fields_by_name[field_name] ++ except KeyError: ++ try: ++ field = message_descriptor.oneofs_by_name[field_name] ++ if field in self._oneofs: ++ field = self._oneofs[field] ++ else: ++ return ++ except KeyError: ++ raise ValueError('Protocol message has no "%s" field.' % field_name) ++ ++ if field in self._fields: ++ # Note: If the field is a sub-message, its listener will still point ++ # at us. That's fine, because the worst than can happen is that it ++ # will call _Modified() and invalidate our byte size. Big deal. ++ del self._fields[field] ++ ++ if self._oneofs.get(field.containing_oneof, None) is field: ++ del self._oneofs[field.containing_oneof] ++ ++ # Always call _Modified() -- even if nothing was changed, this is ++ # a mutating method, and thus calling it should cause the field to become ++ # present in the parent message. ++ self._Modified() ++ ++ cls.ClearField = ClearField ++ ++ ++def _AddClearExtensionMethod(cls): ++ """Helper for _AddMessageMethods().""" ++ def ClearExtension(self, extension_handle): ++ _VerifyExtensionHandle(self, extension_handle) ++ ++ # Similar to ClearField(), above. ++ if extension_handle in self._fields: ++ del self._fields[extension_handle] ++ self._Modified() ++ cls.ClearExtension = ClearExtension ++ ++ ++def _AddClearMethod(message_descriptor, cls): ++ """Helper for _AddMessageMethods().""" ++ def Clear(self): ++ # Clear fields. ++ self._fields = {} ++ self._unknown_fields = () ++ self._Modified() ++ cls.Clear = Clear ++ ++ ++def _AddHasExtensionMethod(cls): ++ """Helper for _AddMessageMethods().""" ++ def HasExtension(self, extension_handle): ++ _VerifyExtensionHandle(self, extension_handle) ++ if extension_handle.label == _FieldDescriptor.LABEL_REPEATED: ++ raise KeyError('"%s" is repeated.' % extension_handle.full_name) ++ ++ if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: ++ value = self._fields.get(extension_handle) ++ return value is not None and value._is_present_in_parent ++ else: ++ return extension_handle in self._fields ++ cls.HasExtension = HasExtension ++ ++ ++def _AddEqualsMethod(message_descriptor, cls): ++ """Helper for _AddMessageMethods().""" ++ def __eq__(self, other): ++ if (not isinstance(other, message_mod.Message) or ++ other.DESCRIPTOR != self.DESCRIPTOR): ++ return False ++ ++ if self is other: ++ return True ++ ++ if not self.ListFields() == other.ListFields(): ++ return False ++ ++ # Sort unknown fields because their order shouldn't affect equality test. ++ unknown_fields = list(self._unknown_fields) ++ unknown_fields.sort() ++ other_unknown_fields = list(other._unknown_fields) ++ other_unknown_fields.sort() ++ ++ return unknown_fields == other_unknown_fields ++ ++ cls.__eq__ = __eq__ ++ ++ ++def _AddStrMethod(message_descriptor, cls): ++ """Helper for _AddMessageMethods().""" ++ def __str__(self): ++ return text_format.MessageToString(self) ++ cls.__str__ = __str__ ++ ++ ++def _AddUnicodeMethod(unused_message_descriptor, cls): ++ """Helper for _AddMessageMethods().""" ++ ++ def __unicode__(self): ++ return text_format.MessageToString(self, as_utf8=True).decode('utf-8') ++ cls.__unicode__ = __unicode__ ++ ++ ++def _AddSetListenerMethod(cls): ++ """Helper for _AddMessageMethods().""" ++ def SetListener(self, listener): ++ if listener is None: ++ self._listener = message_listener_mod.NullMessageListener() ++ else: ++ self._listener = listener ++ cls._SetListener = SetListener ++ ++ ++def _BytesForNonRepeatedElement(value, field_number, field_type): ++ """Returns the number of bytes needed to serialize a non-repeated element. ++ The returned byte count includes space for tag information and any ++ other additional space associated with serializing value. ++ ++ Args: ++ value: Value we're serializing. ++ field_number: Field number of this value. (Since the field number ++ is stored as part of a varint-encoded tag, this has an impact ++ on the total bytes required to serialize the value). ++ field_type: The type of the field. One of the TYPE_* constants ++ within FieldDescriptor. ++ """ ++ try: ++ fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type] ++ return fn(field_number, value) ++ except KeyError: ++ raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) ++ ++ ++def _AddByteSizeMethod(message_descriptor, cls): ++ """Helper for _AddMessageMethods().""" ++ ++ def ByteSize(self): ++ if not self._cached_byte_size_dirty: ++ return self._cached_byte_size ++ ++ size = 0 ++ for field_descriptor, field_value in self.ListFields(): ++ size += field_descriptor._sizer(field_value) ++ ++ for tag_bytes, value_bytes in self._unknown_fields: ++ size += len(tag_bytes) + len(value_bytes) ++ ++ self._cached_byte_size = size ++ self._cached_byte_size_dirty = False ++ self._listener_for_children.dirty = False ++ return size ++ ++ cls.ByteSize = ByteSize ++ ++ ++def _AddSerializeToStringMethod(message_descriptor, cls): ++ """Helper for _AddMessageMethods().""" ++ ++ def SerializeToString(self): ++ # Check if the message has all of its required fields set. ++ errors = [] ++ if not self.IsInitialized(): ++ raise message_mod.EncodeError( ++ 'Message %s is missing required fields: %s' % ( ++ self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors()))) ++ return self.SerializePartialToString() ++ cls.SerializeToString = SerializeToString ++ ++ ++def _AddSerializePartialToStringMethod(message_descriptor, cls): ++ """Helper for _AddMessageMethods().""" ++ ++ def SerializePartialToString(self): ++ out = BytesIO() ++ self._InternalSerialize(out.write) ++ return out.getvalue() ++ cls.SerializePartialToString = SerializePartialToString ++ ++ def InternalSerialize(self, write_bytes): ++ for field_descriptor, field_value in self.ListFields(): ++ field_descriptor._encoder(write_bytes, field_value) ++ for tag_bytes, value_bytes in self._unknown_fields: ++ write_bytes(tag_bytes) ++ write_bytes(value_bytes) ++ cls._InternalSerialize = InternalSerialize ++ ++ ++def _AddMergeFromStringMethod(message_descriptor, cls): ++ """Helper for _AddMessageMethods().""" ++ def MergeFromString(self, serialized): ++ length = len(serialized) ++ try: ++ if self._InternalParse(serialized, 0, length) != length: ++ # The only reason _InternalParse would return early is if it ++ # encountered an end-group tag. ++ raise message_mod.DecodeError('Unexpected end-group tag.') ++ except (IndexError, TypeError): ++ # Now ord(buf[p:p+1]) == ord('') gets TypeError. ++ raise message_mod.DecodeError('Truncated message.') ++ except struct.error as e: ++ raise message_mod.DecodeError(e) ++ return length # Return this for legacy reasons. ++ cls.MergeFromString = MergeFromString ++ ++ local_ReadTag = decoder.ReadTag ++ local_SkipField = decoder.SkipField ++ decoders_by_tag = cls._decoders_by_tag ++ ++ def InternalParse(self, buffer, pos, end): ++ self._Modified() ++ field_dict = self._fields ++ unknown_field_list = self._unknown_fields ++ while pos != end: ++ (tag_bytes, new_pos) = local_ReadTag(buffer, pos) ++ field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None)) ++ if field_decoder is None: ++ value_start_pos = new_pos ++ new_pos = local_SkipField(buffer, new_pos, end, tag_bytes) ++ if new_pos == -1: ++ return pos ++ if not unknown_field_list: ++ unknown_field_list = self._unknown_fields = [] ++ unknown_field_list.append((tag_bytes, buffer[value_start_pos:new_pos])) ++ pos = new_pos ++ else: ++ pos = field_decoder(buffer, new_pos, end, self, field_dict) ++ if field_desc: ++ self._UpdateOneofState(field_desc) ++ return pos ++ cls._InternalParse = InternalParse ++ ++ ++def _AddIsInitializedMethod(message_descriptor, cls): ++ """Adds the IsInitialized and FindInitializationError methods to the ++ protocol message class.""" ++ ++ required_fields = [field for field in message_descriptor.fields ++ if field.label == _FieldDescriptor.LABEL_REQUIRED] ++ ++ def IsInitialized(self, errors=None): ++ """Checks if all required fields of a message are set. ++ ++ Args: ++ errors: A list which, if provided, will be populated with the field ++ paths of all missing required fields. ++ ++ Returns: ++ True iff the specified message has all required fields set. ++ """ ++ ++ # Performance is critical so we avoid HasField() and ListFields(). ++ ++ for field in required_fields: ++ if (field not in self._fields or ++ (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and ++ not self._fields[field]._is_present_in_parent)): ++ if errors is not None: ++ errors.extend(self.FindInitializationErrors()) ++ return False ++ ++ for field, value in list(self._fields.items()): # dict can change size! ++ if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: ++ if field.label == _FieldDescriptor.LABEL_REPEATED: ++ for element in value: ++ if not element.IsInitialized(): ++ if errors is not None: ++ errors.extend(self.FindInitializationErrors()) ++ return False ++ elif value._is_present_in_parent and not value.IsInitialized(): ++ if errors is not None: ++ errors.extend(self.FindInitializationErrors()) ++ return False ++ ++ return True ++ ++ cls.IsInitialized = IsInitialized ++ ++ def FindInitializationErrors(self): ++ """Finds required fields which are not initialized. ++ ++ Returns: ++ A list of strings. Each string is a path to an uninitialized field from ++ the top-level message, e.g. "foo.bar[5].baz". ++ """ ++ ++ errors = [] # simplify things ++ ++ for field in required_fields: ++ if not self.HasField(field.name): ++ errors.append(field.name) ++ ++ for field, value in self.ListFields(): ++ if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: ++ if field.is_extension: ++ name = "(%s)" % field.full_name ++ else: ++ name = field.name ++ ++ if field.label == _FieldDescriptor.LABEL_REPEATED: ++ for i in range(len(value)): ++ element = value[i] ++ prefix = "%s[%d]." % (name, i) ++ sub_errors = element.FindInitializationErrors() ++ errors += [ prefix + error for error in sub_errors ] ++ else: ++ prefix = name + "." ++ sub_errors = value.FindInitializationErrors() ++ errors += [ prefix + error for error in sub_errors ] ++ ++ return errors ++ ++ cls.FindInitializationErrors = FindInitializationErrors ++ ++ ++def _AddMergeFromMethod(cls): ++ LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED ++ CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE ++ ++ def MergeFrom(self, msg): ++ if not isinstance(msg, cls): ++ raise TypeError( ++ "Parameter to MergeFrom() must be instance of same class: " ++ "expected %s got %s." % (cls.__name__, type(msg).__name__)) ++ ++ assert msg is not self ++ self._Modified() ++ ++ fields = self._fields ++ ++ for field, value in msg._fields.items(): ++ if field.label == LABEL_REPEATED: ++ field_value = fields.get(field) ++ if field_value is None: ++ # Construct a new object to represent this field. ++ field_value = field._default_constructor(self) ++ fields[field] = field_value ++ field_value.MergeFrom(value) ++ elif field.cpp_type == CPPTYPE_MESSAGE: ++ if value._is_present_in_parent: ++ field_value = fields.get(field) ++ if field_value is None: ++ # Construct a new object to represent this field. ++ field_value = field._default_constructor(self) ++ fields[field] = field_value ++ field_value.MergeFrom(value) ++ else: ++ self._fields[field] = value ++ ++ if msg._unknown_fields: ++ if not self._unknown_fields: ++ self._unknown_fields = [] ++ self._unknown_fields.extend(msg._unknown_fields) ++ ++ cls.MergeFrom = MergeFrom ++ ++ ++def _AddWhichOneofMethod(message_descriptor, cls): ++ def WhichOneof(self, oneof_name): ++ """Returns the name of the currently set field inside a oneof, or None.""" ++ try: ++ field = message_descriptor.oneofs_by_name[oneof_name] ++ except KeyError: ++ raise ValueError( ++ 'Protocol message has no oneof "%s" field.' % oneof_name) ++ ++ nested_field = self._oneofs.get(field, None) ++ if nested_field is not None and self.HasField(nested_field.name): ++ return nested_field.name ++ else: ++ return None ++ ++ cls.WhichOneof = WhichOneof ++ ++ ++def _AddMessageMethods(message_descriptor, cls): ++ """Adds implementations of all Message methods to cls.""" ++ _AddListFieldsMethod(message_descriptor, cls) ++ _AddHasFieldMethod(message_descriptor, cls) ++ _AddClearFieldMethod(message_descriptor, cls) ++ if message_descriptor.is_extendable: ++ _AddClearExtensionMethod(cls) ++ _AddHasExtensionMethod(cls) ++ _AddClearMethod(message_descriptor, cls) ++ _AddEqualsMethod(message_descriptor, cls) ++ _AddStrMethod(message_descriptor, cls) ++ _AddUnicodeMethod(message_descriptor, cls) ++ _AddSetListenerMethod(cls) ++ _AddByteSizeMethod(message_descriptor, cls) ++ _AddSerializeToStringMethod(message_descriptor, cls) ++ _AddSerializePartialToStringMethod(message_descriptor, cls) ++ _AddMergeFromStringMethod(message_descriptor, cls) ++ _AddIsInitializedMethod(message_descriptor, cls) ++ _AddMergeFromMethod(cls) ++ _AddWhichOneofMethod(message_descriptor, cls) ++ ++def _AddPrivateHelperMethods(message_descriptor, cls): ++ """Adds implementation of private helper methods to cls.""" ++ ++ def Modified(self): ++ """Sets the _cached_byte_size_dirty bit to true, ++ and propagates this to our listener iff this was a state change. ++ """ ++ ++ # Note: Some callers check _cached_byte_size_dirty before calling ++ # _Modified() as an extra optimization. So, if this method is ever ++ # changed such that it does stuff even when _cached_byte_size_dirty is ++ # already true, the callers need to be updated. ++ if not self._cached_byte_size_dirty: ++ self._cached_byte_size_dirty = True ++ self._listener_for_children.dirty = True ++ self._is_present_in_parent = True ++ self._listener.Modified() ++ ++ def _UpdateOneofState(self, field): ++ """Sets field as the active field in its containing oneof. ++ ++ Will also delete currently active field in the oneof, if it is different ++ from the argument. Does not mark the message as modified. ++ """ ++ other_field = self._oneofs.setdefault(field.containing_oneof, field) ++ if other_field is not field: ++ del self._fields[other_field] ++ self._oneofs[field.containing_oneof] = field ++ ++ cls._Modified = Modified ++ cls.SetInParent = Modified ++ cls._UpdateOneofState = _UpdateOneofState ++ ++ ++class _Listener(object): ++ ++ """MessageListener implementation that a parent message registers with its ++ child message. ++ ++ In order to support semantics like: ++ ++ foo.bar.baz.qux = 23 ++ assert foo.HasField('bar') ++ ++ ...child objects must have back references to their parents. ++ This helper class is at the heart of this support. ++ """ ++ ++ def __init__(self, parent_message): ++ """Args: ++ parent_message: The message whose _Modified() method we should call when ++ we receive Modified() messages. ++ """ ++ # This listener establishes a back reference from a child (contained) object ++ # to its parent (containing) object. We make this a weak reference to avoid ++ # creating cyclic garbage when the client finishes with the 'parent' object ++ # in the tree. ++ if isinstance(parent_message, weakref.ProxyType): ++ self._parent_message_weakref = parent_message ++ else: ++ self._parent_message_weakref = weakref.proxy(parent_message) ++ ++ # As an optimization, we also indicate directly on the listener whether ++ # or not the parent message is dirty. This way we can avoid traversing ++ # up the tree in the common case. ++ self.dirty = False ++ ++ def Modified(self): ++ if self.dirty: ++ return ++ try: ++ # Propagate the signal to our parents iff this is the first field set. ++ self._parent_message_weakref._Modified() ++ except ReferenceError: ++ # We can get here if a client has kept a reference to a child object, ++ # and is now setting a field on it, but the child's parent has been ++ # garbage-collected. This is not an error. ++ pass ++ ++ ++class _OneofListener(_Listener): ++ """Special listener implementation for setting composite oneof fields.""" ++ ++ def __init__(self, parent_message, field): ++ """Args: ++ parent_message: The message whose _Modified() method we should call when ++ we receive Modified() messages. ++ field: The descriptor of the field being set in the parent message. ++ """ ++ super(_OneofListener, self).__init__(parent_message) ++ self._field = field ++ ++ def Modified(self): ++ """Also updates the state of the containing oneof in the parent message.""" ++ try: ++ self._parent_message_weakref._UpdateOneofState(self._field) ++ super(_OneofListener, self).Modified() ++ except ReferenceError: ++ pass ++ ++ ++# TODO(robinson): Move elsewhere? This file is getting pretty ridiculous... ++# TODO(robinson): Unify error handling of "unknown extension" crap. ++# TODO(robinson): Support iteritems()-style iteration over all ++# extensions with the "has" bits turned on? ++class _ExtensionDict(object): ++ ++ """Dict-like container for supporting an indexable "Extensions" ++ field on proto instances. ++ ++ Note that in all cases we expect extension handles to be ++ FieldDescriptors. ++ """ ++ ++ def __init__(self, extended_message): ++ """extended_message: Message instance for which we are the Extensions dict. ++ """ ++ ++ self._extended_message = extended_message ++ ++ def __getitem__(self, extension_handle): ++ """Returns the current value of the given extension handle.""" ++ ++ _VerifyExtensionHandle(self._extended_message, extension_handle) ++ ++ result = self._extended_message._fields.get(extension_handle) ++ if result is not None: ++ return result ++ ++ if extension_handle.label == _FieldDescriptor.LABEL_REPEATED: ++ result = extension_handle._default_constructor(self._extended_message) ++ elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: ++ result = extension_handle.message_type._concrete_class() ++ try: ++ result._SetListener(self._extended_message._listener_for_children) ++ except ReferenceError: ++ pass ++ else: ++ # Singular scalar -- just return the default without inserting into the ++ # dict. ++ return extension_handle.default_value ++ ++ # Atomically check if another thread has preempted us and, if not, swap ++ # in the new object we just created. If someone has preempted us, we ++ # take that object and discard ours. ++ # WARNING: We are relying on setdefault() being atomic. This is true ++ # in CPython but we haven't investigated others. This warning appears ++ # in several other locations in this file. ++ result = self._extended_message._fields.setdefault( ++ extension_handle, result) ++ ++ return result ++ ++ def __eq__(self, other): ++ if not isinstance(other, self.__class__): ++ return False ++ ++ my_fields = self._extended_message.ListFields() ++ other_fields = other._extended_message.ListFields() ++ ++ # Get rid of non-extension fields. ++ my_fields = [ field for field in my_fields if field.is_extension ] ++ other_fields = [ field for field in other_fields if field.is_extension ] ++ ++ return my_fields == other_fields ++ ++ def __ne__(self, other): ++ return not self == other ++ ++ def __hash__(self): ++ raise TypeError('unhashable object') ++ ++ # Note that this is only meaningful for non-repeated, scalar extension ++ # fields. Note also that we may have to call _Modified() when we do ++ # successfully set a field this way, to set any necssary "has" bits in the ++ # ancestors of the extended message. ++ def __setitem__(self, extension_handle, value): ++ """If extension_handle specifies a non-repeated, scalar extension ++ field, sets the value of that field. ++ """ ++ ++ _VerifyExtensionHandle(self._extended_message, extension_handle) ++ ++ if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or ++ extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE): ++ raise TypeError( ++ 'Cannot assign to extension "%s" because it is a repeated or ' ++ 'composite type.' % extension_handle.full_name) ++ ++ # It's slightly wasteful to lookup the type checker each time, ++ # but we expect this to be a vanishingly uncommon case anyway. ++ type_checker = type_checkers.GetTypeChecker( ++ extension_handle) ++ # pylint: disable=protected-access ++ self._extended_message._fields[extension_handle] = ( ++ type_checker.CheckValue(value)) ++ self._extended_message._Modified() ++ ++ def _FindExtensionByName(self, name): ++ """Tries to find a known extension with the specified name. ++ ++ Args: ++ name: Extension full name. ++ ++ Returns: ++ Extension field descriptor. ++ """ ++ return self._extended_message._extensions_by_name.get(name, None) +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/reflection_test.py +@@ -0,0 +1,2937 @@ ++#! /usr/bin/python ++# -*- coding: utf-8 -*- ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Unittest for reflection.py, which also indirectly tests the output of the ++pure-Python protocol compiler. ++""" ++ ++__author__ = 'robinson@google.com (Will Robinson)' ++ ++import copy ++import gc ++import operator ++import struct ++ ++import six ++ ++from google.apputils import basetest ++from google.protobuf import unittest_import_pb2 ++from google.protobuf import unittest_mset_pb2 ++from google.protobuf import unittest_pb2 ++from google.protobuf import descriptor_pb2 ++from google.protobuf import descriptor ++from google.protobuf import message ++from google.protobuf import reflection ++from google.protobuf import text_format ++from google.protobuf.internal import api_implementation ++from google.protobuf.internal import more_extensions_pb2 ++from google.protobuf.internal import more_messages_pb2 ++from google.protobuf.internal import wire_format ++from google.protobuf.internal import test_util ++from google.protobuf.internal import decoder ++ ++ ++class _MiniDecoder(object): ++ """Decodes a stream of values from a string. ++ ++ Once upon a time we actually had a class called decoder.Decoder. Then we ++ got rid of it during a redesign that made decoding much, much faster overall. ++ But a couple tests in this file used it to check that the serialized form of ++ a message was correct. So, this class implements just the methods that were ++ used by said tests, so that we don't have to rewrite the tests. ++ """ ++ ++ def __init__(self, bytes): ++ self._bytes = bytes ++ self._pos = 0 ++ ++ def ReadVarint(self): ++ result, self._pos = decoder._DecodeVarint(self._bytes, self._pos) ++ return result ++ ++ ReadInt32 = ReadVarint ++ ReadInt64 = ReadVarint ++ ReadUInt32 = ReadVarint ++ ReadUInt64 = ReadVarint ++ ++ def ReadSInt64(self): ++ return wire_format.ZigZagDecode(self.ReadVarint()) ++ ++ ReadSInt32 = ReadSInt64 ++ ++ def ReadFieldNumberAndWireType(self): ++ return wire_format.UnpackTag(self.ReadVarint()) ++ ++ def ReadFloat(self): ++ result = struct.unpack(">> struct.unpack('f', struct.pack('f', 1.2))[0] ++ # 1.2000000476837158 ++ # >>> struct.unpack('f', struct.pack('f', 1.25))[0] ++ # 1.25 ++ message.payload.optional_float = 1.25 ++ # Check rounding at 15 significant digits ++ message.payload.optional_double = -.000003456789012345678 ++ # Check no decimal point. ++ message.payload.repeated_float.append(-5642) ++ # Check no trailing zeros. ++ message.payload.repeated_double.append(.000078900) ++ formatted_fields = ['optional_float: 1.25', ++ 'optional_double: -3.45678901234568e-6', ++ 'repeated_float: -5642', ++ 'repeated_double: 7.89e-5'] ++ text_message = text_format.MessageToString(message, float_format='.15g') ++ self.CompareToGoldenText( ++ self.RemoveRedundantZeros(text_message), ++ 'payload {{\n {}\n {}\n {}\n {}\n}}\n'.format(*formatted_fields)) ++ # as_one_line=True is a separate code branch where float_format is passed. ++ text_message = text_format.MessageToString(message, as_one_line=True, ++ float_format='.15g') ++ self.CompareToGoldenText( ++ self.RemoveRedundantZeros(text_message), ++ 'payload {{ {} {} {} {} }}'.format(*formatted_fields)) ++ ++ def testMessageToString(self): ++ message = unittest_pb2.ForeignMessage() ++ message.c = 123 ++ self.assertEqual('c: 123\n', str(message)) ++ ++ def RemoveRedundantZeros(self, text): ++ # Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove ++ # these zeros in order to match the golden file. ++ text = text.replace('e+0','e+').replace('e+0','e+') \ ++ .replace('e-0','e-').replace('e-0','e-') ++ # Floating point fields are printed with .0 suffix even if they are ++ # actualy integer numbers. ++ text = re.compile('\.0$', re.MULTILINE).sub('', text) ++ return text ++ ++ def testParseGolden(self): ++ golden_text = '\n'.join(self.ReadGolden('text_format_unittest_data.txt')) ++ parsed_message = unittest_pb2.TestAllTypes() ++ r = text_format.Parse(golden_text, parsed_message) ++ self.assertIs(r, parsed_message) ++ ++ message = unittest_pb2.TestAllTypes() ++ test_util.SetAllFields(message) ++ self.assertEquals(message, parsed_message) ++ ++ def testParseGoldenExtensions(self): ++ golden_text = '\n'.join(self.ReadGolden( ++ 'text_format_unittest_extensions_data.txt')) ++ parsed_message = unittest_pb2.TestAllExtensions() ++ text_format.Parse(golden_text, parsed_message) ++ ++ message = unittest_pb2.TestAllExtensions() ++ test_util.SetAllExtensions(message) ++ self.assertEquals(message, parsed_message) ++ ++ def testParseAllFields(self): ++ message = unittest_pb2.TestAllTypes() ++ test_util.SetAllFields(message) ++ ascii_text = text_format.MessageToString(message) ++ ++ parsed_message = unittest_pb2.TestAllTypes() ++ text_format.Parse(ascii_text, parsed_message) ++ self.assertEqual(message, parsed_message) ++ test_util.ExpectAllFieldsSet(self, message) ++ ++ def testParseAllExtensions(self): ++ message = unittest_pb2.TestAllExtensions() ++ test_util.SetAllExtensions(message) ++ ascii_text = text_format.MessageToString(message) ++ ++ parsed_message = unittest_pb2.TestAllExtensions() ++ text_format.Parse(ascii_text, parsed_message) ++ self.assertEqual(message, parsed_message) ++ ++ def testParseMessageSet(self): ++ message = unittest_pb2.TestAllTypes() ++ text = ('repeated_uint64: 1\n' ++ 'repeated_uint64: 2\n') ++ text_format.Parse(text, message) ++ self.assertEqual(1, message.repeated_uint64[0]) ++ self.assertEqual(2, message.repeated_uint64[1]) ++ ++ message = unittest_mset_pb2.TestMessageSetContainer() ++ text = ('message_set {\n' ++ ' [protobuf_unittest.TestMessageSetExtension1] {\n' ++ ' i: 23\n' ++ ' }\n' ++ ' [protobuf_unittest.TestMessageSetExtension2] {\n' ++ ' str: \"foo\"\n' ++ ' }\n' ++ '}\n') ++ text_format.Parse(text, message) ++ ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension ++ ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension ++ self.assertEquals(23, message.message_set.Extensions[ext1].i) ++ self.assertEquals('foo', message.message_set.Extensions[ext2].str) ++ ++ def testParseExotic(self): ++ message = unittest_pb2.TestAllTypes() ++ text = ('repeated_int64: -9223372036854775808\n' ++ 'repeated_uint64: 18446744073709551615\n' ++ 'repeated_double: 123.456\n' ++ 'repeated_double: 1.23e+22\n' ++ 'repeated_double: 1.23e-18\n' ++ 'repeated_string: \n' ++ '"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n' ++ 'repeated_string: "foo" \'corge\' "grault"\n' ++ 'repeated_string: "\\303\\274\\352\\234\\237"\n' ++ 'repeated_string: "\\xc3\\xbc"\n' ++ 'repeated_string: "\xc3\xbc"\n') ++ text_format.Parse(text, message) ++ ++ self.assertEqual(-9223372036854775808, message.repeated_int64[0]) ++ self.assertEqual(18446744073709551615, message.repeated_uint64[0]) ++ self.assertEqual(123.456, message.repeated_double[0]) ++ self.assertEqual(1.23e22, message.repeated_double[1]) ++ self.assertEqual(1.23e-18, message.repeated_double[2]) ++ self.assertEqual( ++ '\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0]) ++ self.assertEqual('foocorgegrault', message.repeated_string[1]) ++ self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2]) ++ self.assertEqual(u'\u00fc', message.repeated_string[3]) ++ ++ def testParseTrailingCommas(self): ++ message = unittest_pb2.TestAllTypes() ++ text = ('repeated_int64: 100;\n' ++ 'repeated_int64: 200;\n' ++ 'repeated_int64: 300,\n' ++ 'repeated_string: "one",\n' ++ 'repeated_string: "two";\n') ++ text_format.Parse(text, message) ++ ++ self.assertEqual(100, message.repeated_int64[0]) ++ self.assertEqual(200, message.repeated_int64[1]) ++ self.assertEqual(300, message.repeated_int64[2]) ++ self.assertEqual(u'one', message.repeated_string[0]) ++ self.assertEqual(u'two', message.repeated_string[1]) ++ ++ def testParseEmptyText(self): ++ message = unittest_pb2.TestAllTypes() ++ text = '' ++ text_format.Parse(text, message) ++ self.assertEquals(unittest_pb2.TestAllTypes(), message) ++ ++ def testParseInvalidUtf8(self): ++ message = unittest_pb2.TestAllTypes() ++ text = 'repeated_string: "\\xc3\\xc3"' ++ self.assertRaises(text_format.ParseError, text_format.Parse, text, message) ++ ++ def testParseSingleWord(self): ++ message = unittest_pb2.TestAllTypes() ++ text = 'foo' ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, ++ ('1:1 : Message type "protobuf_unittest.TestAllTypes" has no field named ' ++ '"foo".'), ++ text_format.Parse, text, message) ++ ++ def testParseUnknownField(self): ++ message = unittest_pb2.TestAllTypes() ++ text = 'unknown_field: 8\n' ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, ++ ('1:1 : Message type "protobuf_unittest.TestAllTypes" has no field named ' ++ '"unknown_field".'), ++ text_format.Parse, text, message) ++ ++ def testParseBadExtension(self): ++ message = unittest_pb2.TestAllExtensions() ++ text = '[unknown_extension]: 8\n' ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, ++ '1:2 : Extension "unknown_extension" not registered.', ++ text_format.Parse, text, message) ++ message = unittest_pb2.TestAllTypes() ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, ++ ('1:2 : Message type "protobuf_unittest.TestAllTypes" does not have ' ++ 'extensions.'), ++ text_format.Parse, text, message) ++ ++ def testParseGroupNotClosed(self): ++ message = unittest_pb2.TestAllTypes() ++ text = 'RepeatedGroup: <' ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, '1:16 : Expected ">".', ++ text_format.Parse, text, message) ++ ++ text = 'RepeatedGroup: {' ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, '1:16 : Expected "}".', ++ text_format.Parse, text, message) ++ ++ def testParseEmptyGroup(self): ++ message = unittest_pb2.TestAllTypes() ++ text = 'OptionalGroup: {}' ++ text_format.Parse(text, message) ++ self.assertTrue(message.HasField('optionalgroup')) ++ ++ message.Clear() ++ ++ message = unittest_pb2.TestAllTypes() ++ text = 'OptionalGroup: <>' ++ text_format.Parse(text, message) ++ self.assertTrue(message.HasField('optionalgroup')) ++ ++ def testParseBadEnumValue(self): ++ message = unittest_pb2.TestAllTypes() ++ text = 'optional_nested_enum: BARR' ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, ++ ('1:23 : Enum type "protobuf_unittest.TestAllTypes.NestedEnum" ' ++ 'has no value named BARR.'), ++ text_format.Parse, text, message) ++ ++ message = unittest_pb2.TestAllTypes() ++ text = 'optional_nested_enum: 100' ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, ++ ('1:23 : Enum type "protobuf_unittest.TestAllTypes.NestedEnum" ' ++ 'has no value with number 100.'), ++ text_format.Parse, text, message) ++ ++ def testParseBadIntValue(self): ++ message = unittest_pb2.TestAllTypes() ++ text = 'optional_int32: bork' ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, ++ ('1:17 : Couldn\'t parse integer: bork'), ++ text_format.Parse, text, message) ++ ++ def testParseStringFieldUnescape(self): ++ message = unittest_pb2.TestAllTypes() ++ text = r'''repeated_string: "\xf\x62" ++ repeated_string: "\\xf\\x62" ++ repeated_string: "\\\xf\\\x62" ++ repeated_string: "\\\\xf\\\\x62" ++ repeated_string: "\\\\\xf\\\\\x62" ++ repeated_string: "\x5cx20"''' ++ text_format.Parse(text, message) ++ ++ SLASH = '\\' ++ self.assertEqual('\x0fb', message.repeated_string[0]) ++ self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1]) ++ self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2]) ++ self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62', ++ message.repeated_string[3]) ++ self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b', ++ message.repeated_string[4]) ++ self.assertEqual(SLASH + 'x20', message.repeated_string[5]) ++ ++ def testMergeRepeatedScalars(self): ++ message = unittest_pb2.TestAllTypes() ++ text = ('optional_int32: 42 ' ++ 'optional_int32: 67') ++ r = text_format.Merge(text, message) ++ self.assertIs(r, message) ++ self.assertEqual(67, message.optional_int32) ++ ++ def testParseRepeatedScalars(self): ++ message = unittest_pb2.TestAllTypes() ++ text = ('optional_int32: 42 ' ++ 'optional_int32: 67') ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, ++ ('1:36 : Message type "protobuf_unittest.TestAllTypes" should not ' ++ 'have multiple "optional_int32" fields.'), ++ text_format.Parse, text, message) ++ ++ def testMergeRepeatedNestedMessageScalars(self): ++ message = unittest_pb2.TestAllTypes() ++ text = ('optional_nested_message { bb: 1 } ' ++ 'optional_nested_message { bb: 2 }') ++ r = text_format.Merge(text, message) ++ self.assertTrue(r is message) ++ self.assertEqual(2, message.optional_nested_message.bb) ++ ++ def testParseRepeatedNestedMessageScalars(self): ++ message = unittest_pb2.TestAllTypes() ++ text = ('optional_nested_message { bb: 1 } ' ++ 'optional_nested_message { bb: 2 }') ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, ++ ('1:65 : Message type "protobuf_unittest.TestAllTypes.NestedMessage" ' ++ 'should not have multiple "bb" fields.'), ++ text_format.Parse, text, message) ++ ++ def testMergeRepeatedExtensionScalars(self): ++ message = unittest_pb2.TestAllExtensions() ++ text = ('[protobuf_unittest.optional_int32_extension]: 42 ' ++ '[protobuf_unittest.optional_int32_extension]: 67') ++ text_format.Merge(text, message) ++ self.assertEqual( ++ 67, ++ message.Extensions[unittest_pb2.optional_int32_extension]) ++ ++ def testParseRepeatedExtensionScalars(self): ++ message = unittest_pb2.TestAllExtensions() ++ text = ('[protobuf_unittest.optional_int32_extension]: 42 ' ++ '[protobuf_unittest.optional_int32_extension]: 67') ++ self.assertRaisesWithLiteralMatch( ++ text_format.ParseError, ++ ('1:96 : Message type "protobuf_unittest.TestAllExtensions" ' ++ 'should not have multiple ' ++ '"protobuf_unittest.optional_int32_extension" extensions.'), ++ text_format.Parse, text, message) ++ ++ def testParseLinesGolden(self): ++ opened = self.ReadGolden('text_format_unittest_data.txt') ++ parsed_message = unittest_pb2.TestAllTypes() ++ r = text_format.ParseLines(opened, parsed_message) ++ self.assertIs(r, parsed_message) ++ ++ message = unittest_pb2.TestAllTypes() ++ test_util.SetAllFields(message) ++ self.assertEquals(message, parsed_message) ++ ++ def testMergeLinesGolden(self): ++ opened = self.ReadGolden('text_format_unittest_data.txt') ++ parsed_message = unittest_pb2.TestAllTypes() ++ r = text_format.MergeLines(opened, parsed_message) ++ self.assertIs(r, parsed_message) ++ ++ message = unittest_pb2.TestAllTypes() ++ test_util.SetAllFields(message) ++ self.assertEqual(message, parsed_message) ++ ++ def testParseOneof(self): ++ m = unittest_pb2.TestAllTypes() ++ m.oneof_uint32 = 11 ++ m2 = unittest_pb2.TestAllTypes() ++ text_format.Parse(text_format.MessageToString(m), m2) ++ self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field')) ++ ++ ++class TokenizerTest(basetest.TestCase): ++ ++ def testSimpleTokenCases(self): ++ text = ('identifier1:"string1"\n \n\n' ++ 'identifier2 : \n \n123 \n identifier3 :\'string\'\n' ++ 'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n' ++ 'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n' ++ 'ID9: 22 ID10: -111111111111111111 ID11: -22\n' ++ 'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f ' ++ 'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f ') ++ tokenizer = text_format._Tokenizer(text.splitlines()) ++ methods = [(tokenizer.ConsumeIdentifier, 'identifier1'), ++ ':', ++ (tokenizer.ConsumeString, 'string1'), ++ (tokenizer.ConsumeIdentifier, 'identifier2'), ++ ':', ++ (tokenizer.ConsumeInt32, 123), ++ (tokenizer.ConsumeIdentifier, 'identifier3'), ++ ':', ++ (tokenizer.ConsumeString, 'string'), ++ (tokenizer.ConsumeIdentifier, 'identifiER_4'), ++ ':', ++ (tokenizer.ConsumeFloat, 1.1e+2), ++ (tokenizer.ConsumeIdentifier, 'ID5'), ++ ':', ++ (tokenizer.ConsumeFloat, -0.23), ++ (tokenizer.ConsumeIdentifier, 'ID6'), ++ ':', ++ (tokenizer.ConsumeString, 'aaaa\'bbbb'), ++ (tokenizer.ConsumeIdentifier, 'ID7'), ++ ':', ++ (tokenizer.ConsumeString, 'aa\"bb'), ++ (tokenizer.ConsumeIdentifier, 'ID8'), ++ ':', ++ '{', ++ (tokenizer.ConsumeIdentifier, 'A'), ++ ':', ++ (tokenizer.ConsumeFloat, float('inf')), ++ (tokenizer.ConsumeIdentifier, 'B'), ++ ':', ++ (tokenizer.ConsumeFloat, -float('inf')), ++ (tokenizer.ConsumeIdentifier, 'C'), ++ ':', ++ (tokenizer.ConsumeBool, True), ++ (tokenizer.ConsumeIdentifier, 'D'), ++ ':', ++ (tokenizer.ConsumeBool, False), ++ '}', ++ (tokenizer.ConsumeIdentifier, 'ID9'), ++ ':', ++ (tokenizer.ConsumeUint32, 22), ++ (tokenizer.ConsumeIdentifier, 'ID10'), ++ ':', ++ (tokenizer.ConsumeInt64, -111111111111111111), ++ (tokenizer.ConsumeIdentifier, 'ID11'), ++ ':', ++ (tokenizer.ConsumeInt32, -22), ++ (tokenizer.ConsumeIdentifier, 'ID12'), ++ ':', ++ (tokenizer.ConsumeUint64, 2222222222222222222), ++ (tokenizer.ConsumeIdentifier, 'ID13'), ++ ':', ++ (tokenizer.ConsumeFloat, 1.23456), ++ (tokenizer.ConsumeIdentifier, 'ID14'), ++ ':', ++ (tokenizer.ConsumeFloat, 1.2e+2), ++ (tokenizer.ConsumeIdentifier, 'false_bool'), ++ ':', ++ (tokenizer.ConsumeBool, False), ++ (tokenizer.ConsumeIdentifier, 'true_BOOL'), ++ ':', ++ (tokenizer.ConsumeBool, True), ++ (tokenizer.ConsumeIdentifier, 'true_bool1'), ++ ':', ++ (tokenizer.ConsumeBool, True), ++ (tokenizer.ConsumeIdentifier, 'false_BOOL1'), ++ ':', ++ (tokenizer.ConsumeBool, False)] ++ ++ i = 0 ++ while not tokenizer.AtEnd(): ++ m = methods[i] ++ if type(m) == str: ++ token = tokenizer.token ++ self.assertEqual(token, m) ++ tokenizer.NextToken() ++ else: ++ self.assertEqual(m[1], m[0]()) ++ i += 1 ++ ++ def testConsumeIntegers(self): ++ # This test only tests the failures in the integer parsing methods as well ++ # as the '0' special cases. ++ int64_max = (1 << 63) - 1 ++ uint32_max = (1 << 32) - 1 ++ text = '-1 %d %d' % (uint32_max + 1, int64_max + 1) ++ tokenizer = text_format._Tokenizer(text.splitlines()) ++ self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32) ++ self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint64) ++ self.assertEqual(-1, tokenizer.ConsumeInt32()) ++ ++ self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32) ++ self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt32) ++ self.assertEqual(uint32_max + 1, tokenizer.ConsumeInt64()) ++ ++ self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt64) ++ self.assertEqual(int64_max + 1, tokenizer.ConsumeUint64()) ++ self.assertTrue(tokenizer.AtEnd()) ++ ++ text = '-0 -0 0 0' ++ tokenizer = text_format._Tokenizer(text.splitlines()) ++ self.assertEqual(0, tokenizer.ConsumeUint32()) ++ self.assertEqual(0, tokenizer.ConsumeUint64()) ++ self.assertEqual(0, tokenizer.ConsumeUint32()) ++ self.assertEqual(0, tokenizer.ConsumeUint64()) ++ self.assertTrue(tokenizer.AtEnd()) ++ ++ def testConsumeByteString(self): ++ text = '"string1\'' ++ tokenizer = text_format._Tokenizer(text.splitlines()) ++ self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) ++ ++ text = 'string1"' ++ tokenizer = text_format._Tokenizer(text.splitlines()) ++ self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) ++ ++ text = '\n"\\xt"' ++ tokenizer = text_format._Tokenizer(text.splitlines()) ++ self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) ++ ++ text = '\n"\\"' ++ tokenizer = text_format._Tokenizer(text.splitlines()) ++ self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) ++ ++ text = '\n"\\x"' ++ tokenizer = text_format._Tokenizer(text.splitlines()) ++ self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) ++ ++ def testConsumeBool(self): ++ text = 'not-a-bool' ++ tokenizer = text_format._Tokenizer(text.splitlines()) ++ self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/type_checkers.py +@@ -0,0 +1,328 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# Copyright 2008 Google Inc. All Rights Reserved. ++ ++"""Provides type checking routines. ++ ++This module defines type checking utilities in the forms of dictionaries: ++ ++VALUE_CHECKERS: A dictionary of field types and a value validation object. ++TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing ++ function. ++TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization ++ function. ++FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their ++ coresponding wire types. ++TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization ++ function. ++""" ++ ++__author__ = 'robinson@google.com (Will Robinson)' ++ ++import six ++ ++if six.PY3: ++ long = int ++ ++from google.protobuf.internal import decoder ++from google.protobuf.internal import encoder ++from google.protobuf.internal import wire_format ++from google.protobuf import descriptor ++ ++_FieldDescriptor = descriptor.FieldDescriptor ++ ++ ++def GetTypeChecker(field): ++ """Returns a type checker for a message field of the specified types. ++ ++ Args: ++ field: FieldDescriptor object for this field. ++ ++ Returns: ++ An instance of TypeChecker which can be used to verify the types ++ of values assigned to a field of the specified type. ++ """ ++ if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and ++ field.type == _FieldDescriptor.TYPE_STRING): ++ return UnicodeValueChecker() ++ if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: ++ return EnumValueChecker(field.enum_type) ++ return _VALUE_CHECKERS[field.cpp_type] ++ ++ ++# None of the typecheckers below make any attempt to guard against people ++# subclassing builtin types and doing weird things. We're not trying to ++# protect against malicious clients here, just people accidentally shooting ++# themselves in the foot in obvious ways. ++ ++class TypeChecker(object): ++ ++ """Type checker used to catch type errors as early as possible ++ when the client is setting scalar fields in protocol messages. ++ """ ++ ++ def __init__(self, *acceptable_types): ++ self._acceptable_types = acceptable_types ++ ++ def CheckValue(self, proposed_value): ++ """Type check the provided value and return it. ++ ++ The returned value might have been normalized to another type. ++ """ ++ if not isinstance(proposed_value, self._acceptable_types): ++ message = ('%.1024r has type %s, but expected one of: %s' % ++ (proposed_value, type(proposed_value), self._acceptable_types)) ++ raise TypeError(message) ++ return proposed_value ++ ++ ++# IntValueChecker and its subclasses perform integer type-checks ++# and bounds-checks. ++class IntValueChecker(object): ++ ++ """Checker used for integer fields. Performs type-check and range check.""" ++ ++ def CheckValue(self, proposed_value): ++ if not isinstance(proposed_value, six.integer_types): ++ message = ('%.1024r has type %s, but expected one of: %s' % ++ (proposed_value, type(proposed_value), six.integer_types)) ++ raise TypeError(message) ++ if not self._MIN <= proposed_value <= self._MAX: ++ raise ValueError('Value out of range: %d' % proposed_value) ++ # We force 32-bit values to int and 64-bit values to long to make ++ # alternate implementations where the distinction is more significant ++ # (e.g. the C++ implementation) simpler. ++ proposed_value = self._TYPE(proposed_value) ++ return proposed_value ++ ++ ++class EnumValueChecker(object): ++ ++ """Checker used for enum fields. Performs type-check and range check.""" ++ ++ def __init__(self, enum_type): ++ self._enum_type = enum_type ++ ++ def CheckValue(self, proposed_value): ++ if not isinstance(proposed_value, six.integer_types): ++ message = ('%.1024r has type %s, but expected one of: %s' % ++ (proposed_value, type(proposed_value), six.integer_types)) ++ raise TypeError(message) ++ if proposed_value not in self._enum_type.values_by_number: ++ raise ValueError('Unknown enum value: %d' % proposed_value) ++ return proposed_value ++ ++ ++class UnicodeValueChecker(object): ++ ++ """Checker used for string fields. ++ ++ Always returns a unicode value, even if the input is of type str. ++ """ ++ ++ def CheckValue(self, proposed_value): ++ if not isinstance(proposed_value, (bytes, six.text_type)): ++ message = ('%.1024r has type %s, but expected one of: %s' % ++ (proposed_value, type(proposed_value), (bytes, six.text_type))) ++ raise TypeError(message) ++ ++ # If the value is of type 'bytes' make sure that it is in 7-bit ASCII ++ # encoding. ++ if isinstance(proposed_value, bytes): ++ try: ++ proposed_value = proposed_value.decode('ascii') ++ except UnicodeDecodeError: ++ raise ValueError('%.1024r has type bytes, but isn\'t in 7-bit ASCII ' ++ 'encoding. Non-ASCII strings must be converted to ' ++ 'unicode objects before being added.' % ++ (proposed_value)) ++ return proposed_value ++ ++ ++class Int32ValueChecker(IntValueChecker): ++ # We're sure to use ints instead of longs here since comparison may be more ++ # efficient. ++ _MIN = -2147483648 ++ _MAX = 2147483647 ++ _TYPE = int ++ ++ ++class Uint32ValueChecker(IntValueChecker): ++ _MIN = 0 ++ _MAX = (1 << 32) - 1 ++ _TYPE = int ++ ++ ++class Int64ValueChecker(IntValueChecker): ++ _MIN = -(1 << 63) ++ _MAX = (1 << 63) - 1 ++ _TYPE = long ++ ++ ++class Uint64ValueChecker(IntValueChecker): ++ _MIN = 0 ++ _MAX = (1 << 64) - 1 ++ _TYPE = long ++ ++ ++# Type-checkers for all scalar CPPTYPEs. ++_VALUE_CHECKERS = { ++ _FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(), ++ _FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(), ++ _FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(), ++ _FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(), ++ _FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker( ++ float, int, long), ++ _FieldDescriptor.CPPTYPE_FLOAT: TypeChecker( ++ float, int, long), ++ _FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int), ++ _FieldDescriptor.CPPTYPE_STRING: TypeChecker(bytes), ++ } ++ ++ ++# Map from field type to a function F, such that F(field_num, value) ++# gives the total byte size for a value of the given type. This ++# byte size includes tag information and any other additional space ++# associated with serializing "value". ++TYPE_TO_BYTE_SIZE_FN = { ++ _FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize, ++ _FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize, ++ _FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize, ++ _FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize, ++ _FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize, ++ _FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize, ++ _FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize, ++ _FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize, ++ _FieldDescriptor.TYPE_STRING: wire_format.StringByteSize, ++ _FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize, ++ _FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize, ++ _FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize, ++ _FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize, ++ _FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize, ++ _FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize, ++ _FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize, ++ _FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize, ++ _FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize ++ } ++ ++ ++# Maps from field types to encoder constructors. ++TYPE_TO_ENCODER = { ++ _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder, ++ _FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder, ++ _FieldDescriptor.TYPE_INT64: encoder.Int64Encoder, ++ _FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder, ++ _FieldDescriptor.TYPE_INT32: encoder.Int32Encoder, ++ _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder, ++ _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder, ++ _FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder, ++ _FieldDescriptor.TYPE_STRING: encoder.StringEncoder, ++ _FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder, ++ _FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder, ++ _FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder, ++ _FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder, ++ _FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder, ++ _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder, ++ _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder, ++ _FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder, ++ _FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder, ++ } ++ ++ ++# Maps from field types to sizer constructors. ++TYPE_TO_SIZER = { ++ _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer, ++ _FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer, ++ _FieldDescriptor.TYPE_INT64: encoder.Int64Sizer, ++ _FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer, ++ _FieldDescriptor.TYPE_INT32: encoder.Int32Sizer, ++ _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer, ++ _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer, ++ _FieldDescriptor.TYPE_BOOL: encoder.BoolSizer, ++ _FieldDescriptor.TYPE_STRING: encoder.StringSizer, ++ _FieldDescriptor.TYPE_GROUP: encoder.GroupSizer, ++ _FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer, ++ _FieldDescriptor.TYPE_BYTES: encoder.BytesSizer, ++ _FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer, ++ _FieldDescriptor.TYPE_ENUM: encoder.EnumSizer, ++ _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer, ++ _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer, ++ _FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer, ++ _FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer, ++ } ++ ++ ++# Maps from field type to a decoder constructor. ++TYPE_TO_DECODER = { ++ _FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder, ++ _FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder, ++ _FieldDescriptor.TYPE_INT64: decoder.Int64Decoder, ++ _FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder, ++ _FieldDescriptor.TYPE_INT32: decoder.Int32Decoder, ++ _FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder, ++ _FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder, ++ _FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder, ++ _FieldDescriptor.TYPE_STRING: decoder.StringDecoder, ++ _FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder, ++ _FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder, ++ _FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder, ++ _FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder, ++ _FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder, ++ _FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder, ++ _FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder, ++ _FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder, ++ _FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder, ++ } ++ ++# Maps from field type to expected wiretype. ++FIELD_TYPE_TO_WIRE_TYPE = { ++ _FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64, ++ _FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32, ++ _FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT, ++ _FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT, ++ _FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT, ++ _FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64, ++ _FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32, ++ _FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT, ++ _FieldDescriptor.TYPE_STRING: ++ wire_format.WIRETYPE_LENGTH_DELIMITED, ++ _FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP, ++ _FieldDescriptor.TYPE_MESSAGE: ++ wire_format.WIRETYPE_LENGTH_DELIMITED, ++ _FieldDescriptor.TYPE_BYTES: ++ wire_format.WIRETYPE_LENGTH_DELIMITED, ++ _FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT, ++ _FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT, ++ _FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32, ++ _FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64, ++ _FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT, ++ _FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT, ++ } +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/unknown_fields_test.py +@@ -0,0 +1,231 @@ ++#! /usr/bin/python ++# -*- coding: utf-8 -*- ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Test for preservation of unknown fields in the pure Python implementation.""" ++ ++__author__ = 'bohdank@google.com (Bohdan Koval)' ++ ++from google.apputils import basetest ++from google.protobuf import unittest_mset_pb2 ++from google.protobuf import unittest_pb2 ++from google.protobuf.internal import encoder ++from google.protobuf.internal import missing_enum_values_pb2 ++from google.protobuf.internal import test_util ++from google.protobuf.internal import type_checkers ++ ++ ++class UnknownFieldsTest(basetest.TestCase): ++ ++ def setUp(self): ++ self.descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR ++ self.all_fields = unittest_pb2.TestAllTypes() ++ test_util.SetAllFields(self.all_fields) ++ self.all_fields_data = self.all_fields.SerializeToString() ++ self.empty_message = unittest_pb2.TestEmptyMessage() ++ self.empty_message.ParseFromString(self.all_fields_data) ++ self.unknown_fields = self.empty_message._unknown_fields ++ ++ def GetField(self, name): ++ field_descriptor = self.descriptor.fields_by_name[name] ++ wire_type = type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type] ++ field_tag = encoder.TagBytes(field_descriptor.number, wire_type) ++ result_dict = {} ++ for tag_bytes, value in self.unknown_fields: ++ if tag_bytes == field_tag: ++ decoder = unittest_pb2.TestAllTypes._decoders_by_tag[tag_bytes][0] ++ decoder(value, 0, len(value), self.all_fields, result_dict) ++ return result_dict[field_descriptor] ++ ++ def testEnum(self): ++ value = self.GetField('optional_nested_enum') ++ self.assertEqual(self.all_fields.optional_nested_enum, value) ++ ++ def testRepeatedEnum(self): ++ value = self.GetField('repeated_nested_enum') ++ self.assertEqual(self.all_fields.repeated_nested_enum, value) ++ ++ def testVarint(self): ++ value = self.GetField('optional_int32') ++ self.assertEqual(self.all_fields.optional_int32, value) ++ ++ def testFixed32(self): ++ value = self.GetField('optional_fixed32') ++ self.assertEqual(self.all_fields.optional_fixed32, value) ++ ++ def testFixed64(self): ++ value = self.GetField('optional_fixed64') ++ self.assertEqual(self.all_fields.optional_fixed64, value) ++ ++ def testLengthDelimited(self): ++ value = self.GetField('optional_string') ++ self.assertEqual(self.all_fields.optional_string, value) ++ ++ def testGroup(self): ++ value = self.GetField('optionalgroup') ++ self.assertEqual(self.all_fields.optionalgroup, value) ++ ++ def testSerialize(self): ++ data = self.empty_message.SerializeToString() ++ ++ # Don't use assertEqual because we don't want to dump raw binary data to ++ # stdout. ++ self.assertTrue(data == self.all_fields_data) ++ ++ def testCopyFrom(self): ++ message = unittest_pb2.TestEmptyMessage() ++ message.CopyFrom(self.empty_message) ++ self.assertEqual(self.unknown_fields, message._unknown_fields) ++ ++ def testMergeFrom(self): ++ message = unittest_pb2.TestAllTypes() ++ message.optional_int32 = 1 ++ message.optional_uint32 = 2 ++ source = unittest_pb2.TestEmptyMessage() ++ source.ParseFromString(message.SerializeToString()) ++ ++ message.ClearField('optional_int32') ++ message.optional_int64 = 3 ++ message.optional_uint32 = 4 ++ destination = unittest_pb2.TestEmptyMessage() ++ destination.ParseFromString(message.SerializeToString()) ++ unknown_fields = destination._unknown_fields[:] ++ ++ destination.MergeFrom(source) ++ self.assertEqual(unknown_fields + source._unknown_fields, ++ destination._unknown_fields) ++ ++ def testClear(self): ++ self.empty_message.Clear() ++ self.assertEqual(0, len(self.empty_message._unknown_fields)) ++ ++ def testByteSize(self): ++ self.assertEqual(self.all_fields.ByteSize(), self.empty_message.ByteSize()) ++ ++ def testUnknownExtensions(self): ++ message = unittest_pb2.TestEmptyMessageWithExtensions() ++ message.ParseFromString(self.all_fields_data) ++ self.assertEqual(self.empty_message._unknown_fields, ++ message._unknown_fields) ++ ++ def testListFields(self): ++ # Make sure ListFields doesn't return unknown fields. ++ self.assertEqual(0, len(self.empty_message.ListFields())) ++ ++ def testSerializeMessageSetWireFormatUnknownExtension(self): ++ # Create a message using the message set wire format with an unknown ++ # message. ++ raw = unittest_mset_pb2.RawMessageSet() ++ ++ # Add an unknown extension. ++ item = raw.item.add() ++ item.type_id = 1545009 ++ message1 = unittest_mset_pb2.TestMessageSetExtension1() ++ message1.i = 12345 ++ item.message = message1.SerializeToString() ++ ++ serialized = raw.SerializeToString() ++ ++ # Parse message using the message set wire format. ++ proto = unittest_mset_pb2.TestMessageSet() ++ proto.MergeFromString(serialized) ++ ++ # Verify that the unknown extension is serialized unchanged ++ reserialized = proto.SerializeToString() ++ new_raw = unittest_mset_pb2.RawMessageSet() ++ new_raw.MergeFromString(reserialized) ++ self.assertEqual(raw, new_raw) ++ ++ def testEquals(self): ++ message = unittest_pb2.TestEmptyMessage() ++ message.ParseFromString(self.all_fields_data) ++ self.assertEqual(self.empty_message, message) ++ ++ self.all_fields.ClearField('optional_string') ++ message.ParseFromString(self.all_fields.SerializeToString()) ++ self.assertNotEqual(self.empty_message, message) ++ ++ ++class UnknownFieldsTest(basetest.TestCase): ++ ++ def setUp(self): ++ self.descriptor = missing_enum_values_pb2.TestEnumValues.DESCRIPTOR ++ ++ self.message = missing_enum_values_pb2.TestEnumValues() ++ self.message.optional_nested_enum = ( ++ missing_enum_values_pb2.TestEnumValues.ZERO) ++ self.message.repeated_nested_enum.extend([ ++ missing_enum_values_pb2.TestEnumValues.ZERO, ++ missing_enum_values_pb2.TestEnumValues.ONE, ++ ]) ++ self.message.packed_nested_enum.extend([ ++ missing_enum_values_pb2.TestEnumValues.ZERO, ++ missing_enum_values_pb2.TestEnumValues.ONE, ++ ]) ++ self.message_data = self.message.SerializeToString() ++ self.missing_message = missing_enum_values_pb2.TestMissingEnumValues() ++ self.missing_message.ParseFromString(self.message_data) ++ self.unknown_fields = self.missing_message._unknown_fields ++ ++ def GetField(self, name): ++ field_descriptor = self.descriptor.fields_by_name[name] ++ wire_type = type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type] ++ field_tag = encoder.TagBytes(field_descriptor.number, wire_type) ++ result_dict = {} ++ for tag_bytes, value in self.unknown_fields: ++ if tag_bytes == field_tag: ++ decoder = missing_enum_values_pb2.TestEnumValues._decoders_by_tag[ ++ tag_bytes][0] ++ decoder(value, 0, len(value), self.message, result_dict) ++ return result_dict[field_descriptor] ++ ++ def testUnknownEnumValue(self): ++ self.assertFalse(self.missing_message.HasField('optional_nested_enum')) ++ value = self.GetField('optional_nested_enum') ++ self.assertEqual(self.message.optional_nested_enum, value) ++ ++ def testUnknownRepeatedEnumValue(self): ++ value = self.GetField('repeated_nested_enum') ++ self.assertEqual(self.message.repeated_nested_enum, value) ++ ++ def testUnknownPackedEnumValue(self): ++ value = self.GetField('packed_nested_enum') ++ self.assertEqual(self.message.packed_nested_enum, value) ++ ++ def testRoundTrip(self): ++ new_message = missing_enum_values_pb2.TestEnumValues() ++ new_message.ParseFromString(self.missing_message.SerializeToString()) ++ self.assertEqual(self.message, new_message) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/wire_format.py +@@ -0,0 +1,268 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Constants and static functions to support protocol buffer wire format.""" ++ ++__author__ = 'robinson@google.com (Will Robinson)' ++ ++import struct ++from google.protobuf import descriptor ++from google.protobuf import message ++ ++ ++TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag. ++TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7 ++ ++# These numbers identify the wire type of a protocol buffer value. ++# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded ++# tag-and-type to store one of these WIRETYPE_* constants. ++# These values must match WireType enum in google/protobuf/wire_format.h. ++WIRETYPE_VARINT = 0 ++WIRETYPE_FIXED64 = 1 ++WIRETYPE_LENGTH_DELIMITED = 2 ++WIRETYPE_START_GROUP = 3 ++WIRETYPE_END_GROUP = 4 ++WIRETYPE_FIXED32 = 5 ++_WIRETYPE_MAX = 5 ++ ++ ++# Bounds for various integer types. ++INT32_MAX = int((1 << 31) - 1) ++INT32_MIN = int(-(1 << 31)) ++UINT32_MAX = (1 << 32) - 1 ++ ++INT64_MAX = (1 << 63) - 1 ++INT64_MIN = -(1 << 63) ++UINT64_MAX = (1 << 64) - 1 ++ ++# "struct" format strings that will encode/decode the specified formats. ++FORMAT_UINT32_LITTLE_ENDIAN = '> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK) ++ ++ ++def ZigZagEncode(value): ++ """ZigZag Transform: Encodes signed integers so that they can be ++ effectively used with varint encoding. See wire_format.h for ++ more details. ++ """ ++ if value >= 0: ++ return value << 1 ++ return (value << 1) ^ (~0) ++ ++ ++def ZigZagDecode(value): ++ """Inverse of ZigZagEncode().""" ++ if not value & 0x1: ++ return value >> 1 ++ return (value >> 1) ^ (~0) ++ ++ ++ ++# The *ByteSize() functions below return the number of bytes required to ++# serialize "field number + type" information and then serialize the value. ++ ++ ++def Int32ByteSize(field_number, int32): ++ return Int64ByteSize(field_number, int32) ++ ++ ++def Int32ByteSizeNoTag(int32): ++ return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32) ++ ++ ++def Int64ByteSize(field_number, int64): ++ # Have to convert to uint before calling UInt64ByteSize(). ++ return UInt64ByteSize(field_number, 0xffffffffffffffff & int64) ++ ++ ++def UInt32ByteSize(field_number, uint32): ++ return UInt64ByteSize(field_number, uint32) ++ ++ ++def UInt64ByteSize(field_number, uint64): ++ return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64) ++ ++ ++def SInt32ByteSize(field_number, int32): ++ return UInt32ByteSize(field_number, ZigZagEncode(int32)) ++ ++ ++def SInt64ByteSize(field_number, int64): ++ return UInt64ByteSize(field_number, ZigZagEncode(int64)) ++ ++ ++def Fixed32ByteSize(field_number, fixed32): ++ return TagByteSize(field_number) + 4 ++ ++ ++def Fixed64ByteSize(field_number, fixed64): ++ return TagByteSize(field_number) + 8 ++ ++ ++def SFixed32ByteSize(field_number, sfixed32): ++ return TagByteSize(field_number) + 4 ++ ++ ++def SFixed64ByteSize(field_number, sfixed64): ++ return TagByteSize(field_number) + 8 ++ ++ ++def FloatByteSize(field_number, flt): ++ return TagByteSize(field_number) + 4 ++ ++ ++def DoubleByteSize(field_number, double): ++ return TagByteSize(field_number) + 8 ++ ++ ++def BoolByteSize(field_number, b): ++ return TagByteSize(field_number) + 1 ++ ++ ++def EnumByteSize(field_number, enum): ++ return UInt32ByteSize(field_number, enum) ++ ++ ++def StringByteSize(field_number, string): ++ return BytesByteSize(field_number, string.encode('utf-8')) ++ ++ ++def BytesByteSize(field_number, b): ++ return (TagByteSize(field_number) ++ + _VarUInt64ByteSizeNoTag(len(b)) ++ + len(b)) ++ ++ ++def GroupByteSize(field_number, message): ++ return (2 * TagByteSize(field_number) # START and END group. ++ + message.ByteSize()) ++ ++ ++def MessageByteSize(field_number, message): ++ return (TagByteSize(field_number) ++ + _VarUInt64ByteSizeNoTag(message.ByteSize()) ++ + message.ByteSize()) ++ ++ ++def MessageSetItemByteSize(field_number, msg): ++ # First compute the sizes of the tags. ++ # There are 2 tags for the beginning and ending of the repeated group, that ++ # is field number 1, one with field number 2 (type_id) and one with field ++ # number 3 (message). ++ total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3)) ++ ++ # Add the number of bytes for type_id. ++ total_size += _VarUInt64ByteSizeNoTag(field_number) ++ ++ message_size = msg.ByteSize() ++ ++ # The number of bytes for encoding the length of the message. ++ total_size += _VarUInt64ByteSizeNoTag(message_size) ++ ++ # The size of the message. ++ total_size += message_size ++ return total_size ++ ++ ++def TagByteSize(field_number): ++ """Returns the bytes required to serialize a tag with this field number.""" ++ # Just pass in type 0, since the type won't affect the tag+type size. ++ return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0)) ++ ++ ++# Private helper function for the *ByteSize() functions above. ++ ++def _VarUInt64ByteSizeNoTag(uint64): ++ """Returns the number of bytes required to serialize a single varint ++ using boundary value comparisons. (unrolled loop optimization -WPierce) ++ uint64 must be unsigned. ++ """ ++ if uint64 <= 0x7f: return 1 ++ if uint64 <= 0x3fff: return 2 ++ if uint64 <= 0x1fffff: return 3 ++ if uint64 <= 0xfffffff: return 4 ++ if uint64 <= 0x7ffffffff: return 5 ++ if uint64 <= 0x3ffffffffff: return 6 ++ if uint64 <= 0x1ffffffffffff: return 7 ++ if uint64 <= 0xffffffffffffff: return 8 ++ if uint64 <= 0x7fffffffffffffff: return 9 ++ if uint64 > UINT64_MAX: ++ raise message.EncodeError('Value out of range: %d' % uint64) ++ return 10 ++ ++ ++NON_PACKABLE_TYPES = ( ++ descriptor.FieldDescriptor.TYPE_STRING, ++ descriptor.FieldDescriptor.TYPE_GROUP, ++ descriptor.FieldDescriptor.TYPE_MESSAGE, ++ descriptor.FieldDescriptor.TYPE_BYTES ++) ++ ++ ++def IsTypePackable(field_type): ++ """Return true iff packable = true is valid for fields of this type. ++ ++ Args: ++ field_type: a FieldDescriptor::Type value. ++ ++ Returns: ++ True iff fields of this type are packable. ++ """ ++ return field_type not in NON_PACKABLE_TYPES +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/internal/wire_format_test.py +@@ -0,0 +1,253 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Test for google.protobuf.internal.wire_format.""" ++ ++__author__ = 'robinson@google.com (Will Robinson)' ++ ++from google.apputils import basetest ++from google.protobuf import message ++from google.protobuf.internal import wire_format ++ ++ ++class WireFormatTest(basetest.TestCase): ++ ++ def testPackTag(self): ++ field_number = 0xabc ++ tag_type = 2 ++ self.assertEqual((field_number << 3) | tag_type, ++ wire_format.PackTag(field_number, tag_type)) ++ PackTag = wire_format.PackTag ++ # Number too high. ++ self.assertRaises(message.EncodeError, PackTag, field_number, 6) ++ # Number too low. ++ self.assertRaises(message.EncodeError, PackTag, field_number, -1) ++ ++ def testUnpackTag(self): ++ # Test field numbers that will require various varint sizes. ++ for expected_field_number in (1, 15, 16, 2047, 2048): ++ for expected_wire_type in range(6): # Highest-numbered wiretype is 5. ++ field_number, wire_type = wire_format.UnpackTag( ++ wire_format.PackTag(expected_field_number, expected_wire_type)) ++ self.assertEqual(expected_field_number, field_number) ++ self.assertEqual(expected_wire_type, wire_type) ++ ++ self.assertRaises(TypeError, wire_format.UnpackTag, None) ++ self.assertRaises(TypeError, wire_format.UnpackTag, 'abc') ++ self.assertRaises(TypeError, wire_format.UnpackTag, 0.0) ++ self.assertRaises(TypeError, wire_format.UnpackTag, object()) ++ ++ def testZigZagEncode(self): ++ Z = wire_format.ZigZagEncode ++ self.assertEqual(0, Z(0)) ++ self.assertEqual(1, Z(-1)) ++ self.assertEqual(2, Z(1)) ++ self.assertEqual(3, Z(-2)) ++ self.assertEqual(4, Z(2)) ++ self.assertEqual(0xfffffffe, Z(0x7fffffff)) ++ self.assertEqual(0xffffffff, Z(-0x80000000)) ++ self.assertEqual(0xfffffffffffffffe, Z(0x7fffffffffffffff)) ++ self.assertEqual(0xffffffffffffffff, Z(-0x8000000000000000)) ++ ++ self.assertRaises(TypeError, Z, None) ++ self.assertRaises(TypeError, Z, 'abcd') ++ self.assertRaises(TypeError, Z, 0.0) ++ self.assertRaises(TypeError, Z, object()) ++ ++ def testZigZagDecode(self): ++ Z = wire_format.ZigZagDecode ++ self.assertEqual(0, Z(0)) ++ self.assertEqual(-1, Z(1)) ++ self.assertEqual(1, Z(2)) ++ self.assertEqual(-2, Z(3)) ++ self.assertEqual(2, Z(4)) ++ self.assertEqual(0x7fffffff, Z(0xfffffffe)) ++ self.assertEqual(-0x80000000, Z(0xffffffff)) ++ self.assertEqual(0x7fffffffffffffff, Z(0xfffffffffffffffe)) ++ self.assertEqual(-0x8000000000000000, Z(0xffffffffffffffff)) ++ ++ self.assertRaises(TypeError, Z, None) ++ self.assertRaises(TypeError, Z, 'abcd') ++ self.assertRaises(TypeError, Z, 0.0) ++ self.assertRaises(TypeError, Z, object()) ++ ++ def NumericByteSizeTestHelper(self, byte_size_fn, value, expected_value_size): ++ # Use field numbers that cause various byte sizes for the tag information. ++ for field_number, tag_bytes in ((15, 1), (16, 2), (2047, 2), (2048, 3)): ++ expected_size = expected_value_size + tag_bytes ++ actual_size = byte_size_fn(field_number, value) ++ self.assertEqual(expected_size, actual_size, ++ 'byte_size_fn: %s, field_number: %d, value: %r\n' ++ 'Expected: %d, Actual: %d'% ( ++ byte_size_fn, field_number, value, expected_size, actual_size)) ++ ++ def testByteSizeFunctions(self): ++ # Test all numeric *ByteSize() functions. ++ NUMERIC_ARGS = [ ++ # Int32ByteSize(). ++ [wire_format.Int32ByteSize, 0, 1], ++ [wire_format.Int32ByteSize, 127, 1], ++ [wire_format.Int32ByteSize, 128, 2], ++ [wire_format.Int32ByteSize, -1, 10], ++ # Int64ByteSize(). ++ [wire_format.Int64ByteSize, 0, 1], ++ [wire_format.Int64ByteSize, 127, 1], ++ [wire_format.Int64ByteSize, 128, 2], ++ [wire_format.Int64ByteSize, -1, 10], ++ # UInt32ByteSize(). ++ [wire_format.UInt32ByteSize, 0, 1], ++ [wire_format.UInt32ByteSize, 127, 1], ++ [wire_format.UInt32ByteSize, 128, 2], ++ [wire_format.UInt32ByteSize, wire_format.UINT32_MAX, 5], ++ # UInt64ByteSize(). ++ [wire_format.UInt64ByteSize, 0, 1], ++ [wire_format.UInt64ByteSize, 127, 1], ++ [wire_format.UInt64ByteSize, 128, 2], ++ [wire_format.UInt64ByteSize, wire_format.UINT64_MAX, 10], ++ # SInt32ByteSize(). ++ [wire_format.SInt32ByteSize, 0, 1], ++ [wire_format.SInt32ByteSize, -1, 1], ++ [wire_format.SInt32ByteSize, 1, 1], ++ [wire_format.SInt32ByteSize, -63, 1], ++ [wire_format.SInt32ByteSize, 63, 1], ++ [wire_format.SInt32ByteSize, -64, 1], ++ [wire_format.SInt32ByteSize, 64, 2], ++ # SInt64ByteSize(). ++ [wire_format.SInt64ByteSize, 0, 1], ++ [wire_format.SInt64ByteSize, -1, 1], ++ [wire_format.SInt64ByteSize, 1, 1], ++ [wire_format.SInt64ByteSize, -63, 1], ++ [wire_format.SInt64ByteSize, 63, 1], ++ [wire_format.SInt64ByteSize, -64, 1], ++ [wire_format.SInt64ByteSize, 64, 2], ++ # Fixed32ByteSize(). ++ [wire_format.Fixed32ByteSize, 0, 4], ++ [wire_format.Fixed32ByteSize, wire_format.UINT32_MAX, 4], ++ # Fixed64ByteSize(). ++ [wire_format.Fixed64ByteSize, 0, 8], ++ [wire_format.Fixed64ByteSize, wire_format.UINT64_MAX, 8], ++ # SFixed32ByteSize(). ++ [wire_format.SFixed32ByteSize, 0, 4], ++ [wire_format.SFixed32ByteSize, wire_format.INT32_MIN, 4], ++ [wire_format.SFixed32ByteSize, wire_format.INT32_MAX, 4], ++ # SFixed64ByteSize(). ++ [wire_format.SFixed64ByteSize, 0, 8], ++ [wire_format.SFixed64ByteSize, wire_format.INT64_MIN, 8], ++ [wire_format.SFixed64ByteSize, wire_format.INT64_MAX, 8], ++ # FloatByteSize(). ++ [wire_format.FloatByteSize, 0.0, 4], ++ [wire_format.FloatByteSize, 1000000000.0, 4], ++ [wire_format.FloatByteSize, -1000000000.0, 4], ++ # DoubleByteSize(). ++ [wire_format.DoubleByteSize, 0.0, 8], ++ [wire_format.DoubleByteSize, 1000000000.0, 8], ++ [wire_format.DoubleByteSize, -1000000000.0, 8], ++ # BoolByteSize(). ++ [wire_format.BoolByteSize, False, 1], ++ [wire_format.BoolByteSize, True, 1], ++ # EnumByteSize(). ++ [wire_format.EnumByteSize, 0, 1], ++ [wire_format.EnumByteSize, 127, 1], ++ [wire_format.EnumByteSize, 128, 2], ++ [wire_format.EnumByteSize, wire_format.UINT32_MAX, 5], ++ ] ++ for args in NUMERIC_ARGS: ++ self.NumericByteSizeTestHelper(*args) ++ ++ # Test strings and bytes. ++ for byte_size_fn in (wire_format.StringByteSize, wire_format.BytesByteSize): ++ # 1 byte for tag, 1 byte for length, 3 bytes for contents. ++ self.assertEqual(5, byte_size_fn(10, 'abc')) ++ # 2 bytes for tag, 1 byte for length, 3 bytes for contents. ++ self.assertEqual(6, byte_size_fn(16, 'abc')) ++ # 2 bytes for tag, 2 bytes for length, 128 bytes for contents. ++ self.assertEqual(132, byte_size_fn(16, 'a' * 128)) ++ ++ # Test UTF-8 string byte size calculation. ++ # 1 byte for tag, 1 byte for length, 8 bytes for content. ++ self.assertEqual(10, wire_format.StringByteSize( ++ 5, b'\xd0\xa2\xd0\xb5\xd1\x81\xd1\x82'.decode('utf-8'))) ++ ++ class MockMessage(object): ++ def __init__(self, byte_size): ++ self.byte_size = byte_size ++ def ByteSize(self): ++ return self.byte_size ++ ++ message_byte_size = 10 ++ mock_message = MockMessage(byte_size=message_byte_size) ++ # Test groups. ++ # (2 * 1) bytes for begin and end tags, plus message_byte_size. ++ self.assertEqual(2 + message_byte_size, ++ wire_format.GroupByteSize(1, mock_message)) ++ # (2 * 2) bytes for begin and end tags, plus message_byte_size. ++ self.assertEqual(4 + message_byte_size, ++ wire_format.GroupByteSize(16, mock_message)) ++ ++ # Test messages. ++ # 1 byte for tag, plus 1 byte for length, plus contents. ++ self.assertEqual(2 + mock_message.byte_size, ++ wire_format.MessageByteSize(1, mock_message)) ++ # 2 bytes for tag, plus 1 byte for length, plus contents. ++ self.assertEqual(3 + mock_message.byte_size, ++ wire_format.MessageByteSize(16, mock_message)) ++ # 2 bytes for tag, plus 2 bytes for length, plus contents. ++ mock_message.byte_size = 128 ++ self.assertEqual(4 + mock_message.byte_size, ++ wire_format.MessageByteSize(16, mock_message)) ++ ++ ++ # Test message set item byte size. ++ # 4 bytes for tags, plus 1 byte for length, plus 1 byte for type_id, ++ # plus contents. ++ mock_message.byte_size = 10 ++ self.assertEqual(mock_message.byte_size + 6, ++ wire_format.MessageSetItemByteSize(1, mock_message)) ++ ++ # 4 bytes for tags, plus 2 bytes for length, plus 1 byte for type_id, ++ # plus contents. ++ mock_message.byte_size = 128 ++ self.assertEqual(mock_message.byte_size + 7, ++ wire_format.MessageSetItemByteSize(1, mock_message)) ++ ++ # 4 bytes for tags, plus 2 bytes for length, plus 2 byte for type_id, ++ # plus contents. ++ self.assertEqual(mock_message.byte_size + 8, ++ wire_format.MessageSetItemByteSize(128, mock_message)) ++ ++ # Too-long varint. ++ self.assertRaises(message.EncodeError, ++ wire_format.UInt64ByteSize, 1, 1 << 128) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/message.py +@@ -0,0 +1,284 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# TODO(robinson): We should just make these methods all "pure-virtual" and move ++# all implementation out, into reflection.py for now. ++ ++ ++"""Contains an abstract base class for protocol messages.""" ++ ++__author__ = 'robinson@google.com (Will Robinson)' ++ ++ ++class Error(Exception): pass ++class DecodeError(Error): pass ++class EncodeError(Error): pass ++ ++ ++class Message(object): ++ ++ """Abstract base class for protocol messages. ++ ++ Protocol message classes are almost always generated by the protocol ++ compiler. These generated types subclass Message and implement the methods ++ shown below. ++ ++ TODO(robinson): Link to an HTML document here. ++ ++ TODO(robinson): Document that instances of this class will also ++ have an Extensions attribute with __getitem__ and __setitem__. ++ Again, not sure how to best convey this. ++ ++ TODO(robinson): Document that the class must also have a static ++ RegisterExtension(extension_field) method. ++ Not sure how to best express at this point. ++ """ ++ ++ # TODO(robinson): Document these fields and methods. ++ ++ __slots__ = [] ++ ++ DESCRIPTOR = None ++ ++ def __deepcopy__(self, memo=None): ++ clone = type(self)() ++ clone.MergeFrom(self) ++ return clone ++ ++ def __eq__(self, other_msg): ++ """Recursively compares two messages by value and structure.""" ++ raise NotImplementedError ++ ++ def __ne__(self, other_msg): ++ # Can't just say self != other_msg, since that would infinitely recurse. :) ++ return not self == other_msg ++ ++ def __hash__(self): ++ raise TypeError('unhashable object') ++ ++ def __str__(self): ++ """Outputs a human-readable representation of the message.""" ++ raise NotImplementedError ++ ++ def __unicode__(self): ++ """Outputs a human-readable representation of the message.""" ++ raise NotImplementedError ++ ++ def MergeFrom(self, other_msg): ++ """Merges the contents of the specified message into current message. ++ ++ This method merges the contents of the specified message into the current ++ message. Singular fields that are set in the specified message overwrite ++ the corresponding fields in the current message. Repeated fields are ++ appended. Singular sub-messages and groups are recursively merged. ++ ++ Args: ++ other_msg: Message to merge into the current message. ++ """ ++ raise NotImplementedError ++ ++ def CopyFrom(self, other_msg): ++ """Copies the content of the specified message into the current message. ++ ++ The method clears the current message and then merges the specified ++ message using MergeFrom. ++ ++ Args: ++ other_msg: Message to copy into the current one. ++ """ ++ if self is other_msg: ++ return ++ self.Clear() ++ self.MergeFrom(other_msg) ++ ++ def Clear(self): ++ """Clears all data that was set in the message.""" ++ raise NotImplementedError ++ ++ def SetInParent(self): ++ """Mark this as present in the parent. ++ ++ This normally happens automatically when you assign a field of a ++ sub-message, but sometimes you want to make the sub-message ++ present while keeping it empty. If you find yourself using this, ++ you may want to reconsider your design.""" ++ raise NotImplementedError ++ ++ def IsInitialized(self): ++ """Checks if the message is initialized. ++ ++ Returns: ++ The method returns True if the message is initialized (i.e. all of its ++ required fields are set). ++ """ ++ raise NotImplementedError ++ ++ # TODO(robinson): MergeFromString() should probably return None and be ++ # implemented in terms of a helper that returns the # of bytes read. Our ++ # deserialization routines would use the helper when recursively ++ # deserializing, but the end user would almost always just want the no-return ++ # MergeFromString(). ++ ++ def MergeFromString(self, serialized): ++ """Merges serialized protocol buffer data into this message. ++ ++ When we find a field in |serialized| that is already present ++ in this message: ++ - If it's a "repeated" field, we append to the end of our list. ++ - Else, if it's a scalar, we overwrite our field. ++ - Else, (it's a nonrepeated composite), we recursively merge ++ into the existing composite. ++ ++ TODO(robinson): Document handling of unknown fields. ++ ++ Args: ++ serialized: Any object that allows us to call buffer(serialized) ++ to access a string of bytes using the buffer interface. ++ ++ TODO(robinson): When we switch to a helper, this will return None. ++ ++ Returns: ++ The number of bytes read from |serialized|. ++ For non-group messages, this will always be len(serialized), ++ but for messages which are actually groups, this will ++ generally be less than len(serialized), since we must ++ stop when we reach an END_GROUP tag. Note that if ++ we *do* stop because of an END_GROUP tag, the number ++ of bytes returned does not include the bytes ++ for the END_GROUP tag information. ++ """ ++ raise NotImplementedError ++ ++ def ParseFromString(self, serialized): ++ """Parse serialized protocol buffer data into this message. ++ ++ Like MergeFromString(), except we clear the object first and ++ do not return the value that MergeFromString returns. ++ """ ++ self.Clear() ++ self.MergeFromString(serialized) ++ ++ def SerializeToString(self): ++ """Serializes the protocol message to a binary string. ++ ++ Returns: ++ A binary string representation of the message if all of the required ++ fields in the message are set (i.e. the message is initialized). ++ ++ Raises: ++ message.EncodeError if the message isn't initialized. ++ """ ++ raise NotImplementedError ++ ++ def SerializePartialToString(self): ++ """Serializes the protocol message to a binary string. ++ ++ This method is similar to SerializeToString but doesn't check if the ++ message is initialized. ++ ++ Returns: ++ A string representation of the partial message. ++ """ ++ raise NotImplementedError ++ ++ # TODO(robinson): Decide whether we like these better ++ # than auto-generated has_foo() and clear_foo() methods ++ # on the instances themselves. This way is less consistent ++ # with C++, but it makes reflection-type access easier and ++ # reduces the number of magically autogenerated things. ++ # ++ # TODO(robinson): Be sure to document (and test) exactly ++ # which field names are accepted here. Are we case-sensitive? ++ # What do we do with fields that share names with Python keywords ++ # like 'lambda' and 'yield'? ++ # ++ # nnorwitz says: ++ # """ ++ # Typically (in python), an underscore is appended to names that are ++ # keywords. So they would become lambda_ or yield_. ++ # """ ++ def ListFields(self): ++ """Returns a list of (FieldDescriptor, value) tuples for all ++ fields in the message which are not empty. A singular field is non-empty ++ if HasField() would return true, and a repeated field is non-empty if ++ it contains at least one element. The fields are ordered by field ++ number""" ++ raise NotImplementedError ++ ++ def HasField(self, field_name): ++ """Checks if a certain field is set for the message. Note if the ++ field_name is not defined in the message descriptor, ValueError will be ++ raised.""" ++ raise NotImplementedError ++ ++ def ClearField(self, field_name): ++ raise NotImplementedError ++ ++ def HasExtension(self, extension_handle): ++ raise NotImplementedError ++ ++ def ClearExtension(self, extension_handle): ++ raise NotImplementedError ++ ++ def ByteSize(self): ++ """Returns the serialized size of this message. ++ Recursively calls ByteSize() on all contained messages. ++ """ ++ raise NotImplementedError ++ ++ def _SetListener(self, message_listener): ++ """Internal method used by the protocol message implementation. ++ Clients should not call this directly. ++ ++ Sets a listener that this message will call on certain state transitions. ++ ++ The purpose of this method is to register back-edges from children to ++ parents at runtime, for the purpose of setting "has" bits and ++ byte-size-dirty bits in the parent and ancestor objects whenever a child or ++ descendant object is modified. ++ ++ If the client wants to disconnect this Message from the object tree, she ++ explicitly sets callback to None. ++ ++ If message_listener is None, unregisters any existing listener. Otherwise, ++ message_listener must implement the MessageListener interface in ++ internal/message_listener.py, and we discard any listener registered ++ via a previous _SetListener() call. ++ """ ++ raise NotImplementedError ++ ++ def __getstate__(self): ++ """Support the pickle protocol.""" ++ return dict(serialized=self.SerializePartialToString()) ++ ++ def __setstate__(self, state): ++ """Support the pickle protocol.""" ++ self.__init__() ++ self.ParseFromString(state['serialized']) +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/message_factory.py +@@ -0,0 +1,151 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# Copyright 2012 Google Inc. All Rights Reserved. ++ ++"""Provides a factory class for generating dynamic messages. ++ ++The easiest way to use this class is if you have access to the FileDescriptor ++protos containing the messages you want to create you can just do the following: ++ ++message_classes = message_factory.GetMessages(iterable_of_file_descriptors) ++my_proto_instance = message_classes['some.proto.package.MessageName']() ++""" ++ ++__author__ = 'matthewtoia@google.com (Matt Toia)' ++ ++from google.protobuf import descriptor_database ++from google.protobuf import descriptor_pool ++from google.protobuf import message ++from google.protobuf import reflection ++ ++ ++class MessageFactory(object): ++ """Factory for creating Proto2 messages from descriptors in a pool.""" ++ ++ def __init__(self, pool=None): ++ """Initializes a new factory.""" ++ self.pool = (pool or descriptor_pool.DescriptorPool( ++ descriptor_database.DescriptorDatabase())) ++ ++ # local cache of all classes built from protobuf descriptors ++ self._classes = {} ++ ++ def GetPrototype(self, descriptor): ++ """Builds a proto2 message class based on the passed in descriptor. ++ ++ Passing a descriptor with a fully qualified name matching a previous ++ invocation will cause the same class to be returned. ++ ++ Args: ++ descriptor: The descriptor to build from. ++ ++ Returns: ++ A class describing the passed in descriptor. ++ """ ++ if descriptor.full_name not in self._classes: ++ descriptor_name = descriptor.name ++ if str is bytes: # PY2 ++ descriptor_name = descriptor.name.encode('ascii', 'ignore') ++ result_class = reflection.GeneratedProtocolMessageType( ++ descriptor_name, ++ (message.Message,), ++ {'DESCRIPTOR': descriptor, '__module__': None}) ++ # If module not set, it wrongly points to the reflection.py module. ++ self._classes[descriptor.full_name] = result_class ++ for field in descriptor.fields: ++ if field.message_type: ++ self.GetPrototype(field.message_type) ++ for extension in result_class.DESCRIPTOR.extensions: ++ if extension.containing_type.full_name not in self._classes: ++ self.GetPrototype(extension.containing_type) ++ extended_class = self._classes[extension.containing_type.full_name] ++ extended_class.RegisterExtension(extension) ++ return self._classes[descriptor.full_name] ++ ++ def GetMessages(self, files): ++ """Gets all the messages from a specified file. ++ ++ This will find and resolve dependencies, failing if the descriptor ++ pool cannot satisfy them. ++ ++ Args: ++ files: The file names to extract messages from. ++ ++ Returns: ++ A dictionary mapping proto names to the message classes. This will include ++ any dependent messages as well as any messages defined in the same file as ++ a specified message. ++ """ ++ result = {} ++ for file_name in files: ++ file_desc = self.pool.FindFileByName(file_name) ++ for name, msg in file_desc.message_types_by_name.items(): ++ if file_desc.package: ++ full_name = '.'.join([file_desc.package, name]) ++ else: ++ full_name = msg.name ++ result[full_name] = self.GetPrototype( ++ self.pool.FindMessageTypeByName(full_name)) ++ ++ # While the extension FieldDescriptors are created by the descriptor pool, ++ # the python classes created in the factory need them to be registered ++ # explicitly, which is done below. ++ # ++ # The call to RegisterExtension will specifically check if the ++ # extension was already registered on the object and either ++ # ignore the registration if the original was the same, or raise ++ # an error if they were different. ++ ++ for name, extension in file_desc.extensions_by_name.items(): ++ if extension.containing_type.full_name not in self._classes: ++ self.GetPrototype(extension.containing_type) ++ extended_class = self._classes[extension.containing_type.full_name] ++ extended_class.RegisterExtension(extension) ++ return result ++ ++ ++_FACTORY = MessageFactory() ++ ++ ++def GetMessages(file_protos): ++ """Builds a dictionary of all the messages available in a set of files. ++ ++ Args: ++ file_protos: A sequence of file protos to build messages out of. ++ ++ Returns: ++ A dictionary mapping proto names to the message classes. This will include ++ any dependent messages as well as any messages defined in the same file as ++ a specified message. ++ """ ++ for file_proto in file_protos: ++ _FACTORY.pool.Add(file_proto) ++ return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos]) +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/README +@@ -0,0 +1,6 @@ ++This is the 'v2' C++ implementation for python proto2. ++ ++It is active when: ++ ++PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ++PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION=2 +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/cpp_message.py +@@ -0,0 +1,61 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Protocol message implementation hooks for C++ implementation. ++ ++Contains helper functions used to create protocol message classes from ++Descriptor objects at runtime backed by the protocol buffer C++ API. ++""" ++ ++__author__ = 'tibell@google.com (Johan Tibell)' ++ ++from google.protobuf.pyext import _message ++from google.protobuf import message ++ ++ ++def NewMessage(bases, message_descriptor, dictionary): ++ """Creates a new protocol message *class*.""" ++ new_bases = [] ++ for base in bases: ++ if base is message.Message: ++ # _message.Message must come before message.Message as it ++ # overrides methods in that class. ++ new_bases.append(_message.Message) ++ new_bases.append(base) ++ return tuple(new_bases) ++ ++ ++def InitMessage(message_descriptor, cls): ++ """Constructs a new message instance (called before instance's __init__).""" ++ ++ def SubInit(self, **kwargs): ++ super(cls, self).__init__(message_descriptor, **kwargs) ++ cls.__init__ = SubInit ++ cls.AddDescriptors(message_descriptor) +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/descriptor.cc +@@ -0,0 +1,357 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: petar@google.com (Petar Petrov) ++ ++#include ++#include ++ ++#include ++#include ++#include ++ ++#define C(str) const_cast(str) ++ ++#if PY_MAJOR_VERSION >= 3 ++ #define PyString_FromStringAndSize PyUnicode_FromStringAndSize ++ #define PyInt_FromLong PyLong_FromLong ++ #if PY_VERSION_HEX < 0x03030000 ++ #error "Python 3.0 - 3.2 are not supported." ++ #else ++ #define PyString_AsString(ob) \ ++ (PyUnicode_Check(ob)? PyUnicode_AsUTF8(ob): PyBytes_AS_STRING(ob)) ++ #endif ++#endif ++ ++namespace google { ++namespace protobuf { ++namespace python { ++ ++ ++#ifndef PyVarObject_HEAD_INIT ++#define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, ++#endif ++#ifndef Py_TYPE ++#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) ++#endif ++ ++ ++static google::protobuf::DescriptorPool* g_descriptor_pool = NULL; ++ ++namespace cfield_descriptor { ++ ++static void Dealloc(CFieldDescriptor* self) { ++ Py_CLEAR(self->descriptor_field); ++ Py_TYPE(self)->tp_free(reinterpret_cast(self)); ++} ++ ++static PyObject* GetFullName(CFieldDescriptor* self, void *closure) { ++ return PyString_FromStringAndSize( ++ self->descriptor->full_name().c_str(), ++ self->descriptor->full_name().size()); ++} ++ ++static PyObject* GetName(CFieldDescriptor *self, void *closure) { ++ return PyString_FromStringAndSize( ++ self->descriptor->name().c_str(), ++ self->descriptor->name().size()); ++} ++ ++static PyObject* GetCppType(CFieldDescriptor *self, void *closure) { ++ return PyInt_FromLong(self->descriptor->cpp_type()); ++} ++ ++static PyObject* GetLabel(CFieldDescriptor *self, void *closure) { ++ return PyInt_FromLong(self->descriptor->label()); ++} ++ ++static PyObject* GetID(CFieldDescriptor *self, void *closure) { ++ return PyLong_FromVoidPtr(self); ++} ++ ++static PyGetSetDef Getters[] = { ++ { C("full_name"), (getter)GetFullName, NULL, "Full name", NULL}, ++ { C("name"), (getter)GetName, NULL, "last name", NULL}, ++ { C("cpp_type"), (getter)GetCppType, NULL, "C++ Type", NULL}, ++ { C("label"), (getter)GetLabel, NULL, "Label", NULL}, ++ { C("id"), (getter)GetID, NULL, "ID", NULL}, ++ {NULL} ++}; ++ ++} // namespace cfield_descriptor ++ ++PyTypeObject CFieldDescriptor_Type = { ++ PyVarObject_HEAD_INIT(&PyType_Type, 0) ++ C("google.protobuf.internal." ++ "_net_proto2___python." ++ "CFieldDescriptor"), // tp_name ++ sizeof(CFieldDescriptor), // tp_basicsize ++ 0, // tp_itemsize ++ (destructor)cfield_descriptor::Dealloc, // tp_dealloc ++ 0, // tp_print ++ 0, // tp_getattr ++ 0, // tp_setattr ++ 0, // tp_compare ++ 0, // tp_repr ++ 0, // tp_as_number ++ 0, // tp_as_sequence ++ 0, // tp_as_mapping ++ 0, // tp_hash ++ 0, // tp_call ++ 0, // tp_str ++ 0, // tp_getattro ++ 0, // tp_setattro ++ 0, // tp_as_buffer ++ Py_TPFLAGS_DEFAULT, // tp_flags ++ C("A Field Descriptor"), // tp_doc ++ 0, // tp_traverse ++ 0, // tp_clear ++ 0, // tp_richcompare ++ 0, // tp_weaklistoffset ++ 0, // tp_iter ++ 0, // tp_iternext ++ 0, // tp_methods ++ 0, // tp_members ++ cfield_descriptor::Getters, // tp_getset ++ 0, // tp_base ++ 0, // tp_dict ++ 0, // tp_descr_get ++ 0, // tp_descr_set ++ 0, // tp_dictoffset ++ 0, // tp_init ++ PyType_GenericAlloc, // tp_alloc ++ PyType_GenericNew, // tp_new ++ PyObject_Del, // tp_free ++}; ++ ++namespace cdescriptor_pool { ++ ++static void Dealloc(CDescriptorPool* self) { ++ Py_TYPE(self)->tp_free(reinterpret_cast(self)); ++} ++ ++static PyObject* NewCDescriptor( ++ const google::protobuf::FieldDescriptor* field_descriptor) { ++ CFieldDescriptor* cfield_descriptor = PyObject_New( ++ CFieldDescriptor, &CFieldDescriptor_Type); ++ if (cfield_descriptor == NULL) { ++ return NULL; ++ } ++ cfield_descriptor->descriptor = field_descriptor; ++ cfield_descriptor->descriptor_field = NULL; ++ ++ return reinterpret_cast(cfield_descriptor); ++} ++ ++PyObject* FindFieldByName(CDescriptorPool* self, PyObject* name) { ++ const char* full_field_name = PyString_AsString(name); ++ if (full_field_name == NULL) { ++ return NULL; ++ } ++ ++ const google::protobuf::FieldDescriptor* field_descriptor = NULL; ++ ++ field_descriptor = self->pool->FindFieldByName(full_field_name); ++ ++ if (field_descriptor == NULL) { ++ PyErr_Format(PyExc_TypeError, "Couldn't find field %.200s", ++ full_field_name); ++ return NULL; ++ } ++ ++ return NewCDescriptor(field_descriptor); ++} ++ ++PyObject* FindExtensionByName(CDescriptorPool* self, PyObject* arg) { ++ const char* full_field_name = PyString_AsString(arg); ++ if (full_field_name == NULL) { ++ return NULL; ++ } ++ ++ const google::protobuf::FieldDescriptor* field_descriptor = ++ self->pool->FindExtensionByName(full_field_name); ++ if (field_descriptor == NULL) { ++ PyErr_Format(PyExc_TypeError, "Couldn't find field %.200s", ++ full_field_name); ++ return NULL; ++ } ++ ++ return NewCDescriptor(field_descriptor); ++} ++ ++static PyMethodDef Methods[] = { ++ { C("FindFieldByName"), ++ (PyCFunction)FindFieldByName, ++ METH_O, ++ C("Searches for a field descriptor by full name.") }, ++ { C("FindExtensionByName"), ++ (PyCFunction)FindExtensionByName, ++ METH_O, ++ C("Searches for extension descriptor by full name.") }, ++ {NULL} ++}; ++ ++} // namespace cdescriptor_pool ++ ++PyTypeObject CDescriptorPool_Type = { ++ PyVarObject_HEAD_INIT(&PyType_Type, 0) ++ C("google.protobuf.internal." ++ "_net_proto2___python." ++ "CFieldDescriptor"), // tp_name ++ sizeof(CDescriptorPool), // tp_basicsize ++ 0, // tp_itemsize ++ (destructor)cdescriptor_pool::Dealloc, // tp_dealloc ++ 0, // tp_print ++ 0, // tp_getattr ++ 0, // tp_setattr ++ 0, // tp_compare ++ 0, // tp_repr ++ 0, // tp_as_number ++ 0, // tp_as_sequence ++ 0, // tp_as_mapping ++ 0, // tp_hash ++ 0, // tp_call ++ 0, // tp_str ++ 0, // tp_getattro ++ 0, // tp_setattro ++ 0, // tp_as_buffer ++ Py_TPFLAGS_DEFAULT, // tp_flags ++ C("A Descriptor Pool"), // tp_doc ++ 0, // tp_traverse ++ 0, // tp_clear ++ 0, // tp_richcompare ++ 0, // tp_weaklistoffset ++ 0, // tp_iter ++ 0, // tp_iternext ++ cdescriptor_pool::Methods, // tp_methods ++ 0, // tp_members ++ 0, // tp_getset ++ 0, // tp_base ++ 0, // tp_dict ++ 0, // tp_descr_get ++ 0, // tp_descr_set ++ 0, // tp_dictoffset ++ 0, // tp_init ++ PyType_GenericAlloc, // tp_alloc ++ PyType_GenericNew, // tp_new ++ PyObject_Del, // tp_free ++}; ++ ++google::protobuf::DescriptorPool* GetDescriptorPool() { ++ if (g_descriptor_pool == NULL) { ++ g_descriptor_pool = new google::protobuf::DescriptorPool( ++ google::protobuf::DescriptorPool::generated_pool()); ++ } ++ return g_descriptor_pool; ++} ++ ++PyObject* Python_NewCDescriptorPool(PyObject* ignored, PyObject* args) { ++ CDescriptorPool* cdescriptor_pool = PyObject_New( ++ CDescriptorPool, &CDescriptorPool_Type); ++ if (cdescriptor_pool == NULL) { ++ return NULL; ++ } ++ cdescriptor_pool->pool = GetDescriptorPool(); ++ return reinterpret_cast(cdescriptor_pool); ++} ++ ++ ++// Collects errors that occur during proto file building to allow them to be ++// propagated in the python exception instead of only living in ERROR logs. ++class BuildFileErrorCollector : public google::protobuf::DescriptorPool::ErrorCollector { ++ public: ++ BuildFileErrorCollector() : error_message(""), had_errors(false) {} ++ ++ void AddError(const string& filename, const string& element_name, ++ const Message* descriptor, ErrorLocation location, ++ const string& message) { ++ // Replicates the logging behavior that happens in the C++ implementation ++ // when an error collector is not passed in. ++ if (!had_errors) { ++ error_message += ++ ("Invalid proto descriptor for file \"" + filename + "\":\n"); ++ } ++ // As this only happens on failure and will result in the program not ++ // running at all, no effort is made to optimize this string manipulation. ++ error_message += (" " + element_name + ": " + message + "\n"); ++ } ++ ++ string error_message; ++ bool had_errors; ++}; ++ ++PyObject* Python_BuildFile(PyObject* ignored, PyObject* arg) { ++ char* message_type; ++ Py_ssize_t message_len; ++ ++ if (PyBytes_AsStringAndSize(arg, &message_type, &message_len) < 0) { ++ return NULL; ++ } ++ ++ google::protobuf::FileDescriptorProto file_proto; ++ if (!file_proto.ParseFromArray(message_type, message_len)) { ++ PyErr_SetString(PyExc_TypeError, "Couldn't parse file content!"); ++ return NULL; ++ } ++ ++ if (google::protobuf::DescriptorPool::generated_pool()->FindFileByName( ++ file_proto.name()) != NULL) { ++ Py_RETURN_NONE; ++ } ++ ++ BuildFileErrorCollector error_collector; ++ const google::protobuf::FileDescriptor* descriptor = ++ GetDescriptorPool()->BuildFileCollectingErrors(file_proto, ++ &error_collector); ++ if (descriptor == NULL) { ++ PyErr_Format(PyExc_TypeError, ++ "Couldn't build proto file into descriptor pool!\n%s", ++ error_collector.error_message.c_str()); ++ return NULL; ++ } ++ ++ Py_RETURN_NONE; ++} ++ ++bool InitDescriptor() { ++ CFieldDescriptor_Type.tp_new = PyType_GenericNew; ++ if (PyType_Ready(&CFieldDescriptor_Type) < 0) ++ return false; ++ ++ CDescriptorPool_Type.tp_new = PyType_GenericNew; ++ if (PyType_Ready(&CDescriptorPool_Type) < 0) ++ return false; ++ ++ return true; ++} ++ ++} // namespace python ++} // namespace protobuf ++} // namespace google +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/descriptor.h +@@ -0,0 +1,96 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: petar@google.com (Petar Petrov) ++ ++#ifndef GOOGLE_PROTOBUF_PYTHON_CPP_DESCRIPTOR_H__ ++#define GOOGLE_PROTOBUF_PYTHON_CPP_DESCRIPTOR_H__ ++ ++#include ++#include ++ ++#include ++ ++#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) ++typedef int Py_ssize_t; ++#define PY_SSIZE_T_MAX INT_MAX ++#define PY_SSIZE_T_MIN INT_MIN ++#endif ++ ++namespace google { ++namespace protobuf { ++namespace python { ++ ++typedef struct CFieldDescriptor { ++ PyObject_HEAD ++ ++ // The proto2 descriptor that this object represents. ++ const google::protobuf::FieldDescriptor* descriptor; ++ ++ // Reference to the original field object in the Python DESCRIPTOR. ++ PyObject* descriptor_field; ++} CFieldDescriptor; ++ ++typedef struct { ++ PyObject_HEAD ++ ++ const google::protobuf::DescriptorPool* pool; ++} CDescriptorPool; ++ ++extern PyTypeObject CFieldDescriptor_Type; ++ ++extern PyTypeObject CDescriptorPool_Type; ++ ++namespace cdescriptor_pool { ++ ++// Looks up a field by name. Returns a CDescriptor corresponding to ++// the field on success, or NULL on failure. ++// ++// Returns a new reference. ++PyObject* FindFieldByName(CDescriptorPool* self, PyObject* name); ++ ++// Looks up an extension by name. Returns a CDescriptor corresponding ++// to the field on success, or NULL on failure. ++// ++// Returns a new reference. ++PyObject* FindExtensionByName(CDescriptorPool* self, PyObject* arg); ++ ++} // namespace cdescriptor_pool ++ ++PyObject* Python_NewCDescriptorPool(PyObject* ignored, PyObject* args); ++PyObject* Python_BuildFile(PyObject* ignored, PyObject* args); ++bool InitDescriptor(); ++google::protobuf::DescriptorPool* GetDescriptorPool(); ++ ++} // namespace python ++} // namespace protobuf ++ ++} // namespace google ++#endif // GOOGLE_PROTOBUF_PYTHON_CPP_DESCRIPTOR_H__ +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/descriptor_cpp2_test.py +@@ -0,0 +1,58 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Tests for google.protobuf.pyext behavior.""" ++ ++__author__ = 'anuraag@google.com (Anuraag Agrawal)' ++ ++import os ++os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp' ++os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION'] = '2' ++ ++# We must set the implementation version above before the google3 imports. ++# pylint: disable=g-import-not-at-top ++from google.apputils import basetest ++from google.protobuf.internal import api_implementation ++# Run all tests from the original module by putting them in our namespace. ++# pylint: disable=wildcard-import ++from google.protobuf.internal.descriptor_test import * ++ ++ ++class ConfirmCppApi2Test(basetest.TestCase): ++ ++ def testImplementationSetting(self): ++ self.assertEqual('cpp', api_implementation.Type()) ++ self.assertEqual(2, api_implementation.Version()) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/extension_dict.cc +@@ -0,0 +1,338 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: anuraag@google.com (Anuraag Agrawal) ++// Author: tibell@google.com (Johan Tibell) ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++namespace google { ++namespace protobuf { ++namespace python { ++ ++extern google::protobuf::DynamicMessageFactory* global_message_factory; ++ ++namespace extension_dict { ++ ++// TODO(tibell): Always use self->message for clarity, just like in ++// RepeatedCompositeContainer. ++static google::protobuf::Message* GetMessage(ExtensionDict* self) { ++ if (self->parent != NULL) { ++ return self->parent->message; ++ } else { ++ return self->message; ++ } ++} ++ ++CFieldDescriptor* InternalGetCDescriptorFromExtension(PyObject* extension) { ++ PyObject* cdescriptor = PyObject_GetAttrString(extension, "_cdescriptor"); ++ if (cdescriptor == NULL) { ++ PyErr_SetString(PyExc_KeyError, "Unregistered extension."); ++ return NULL; ++ } ++ if (!PyObject_TypeCheck(cdescriptor, &CFieldDescriptor_Type)) { ++ PyErr_SetString(PyExc_TypeError, "Not a CFieldDescriptor"); ++ Py_DECREF(cdescriptor); ++ return NULL; ++ } ++ CFieldDescriptor* descriptor = ++ reinterpret_cast(cdescriptor); ++ return descriptor; ++} ++ ++PyObject* len(ExtensionDict* self) { ++#if PY_MAJOR_VERSION >= 3 ++ return PyLong_FromLong(PyDict_Size(self->values)); ++#else ++ return PyInt_FromLong(PyDict_Size(self->values)); ++#endif ++} ++ ++// TODO(tibell): Use VisitCompositeField. ++int ReleaseExtension(ExtensionDict* self, ++ PyObject* extension, ++ const google::protobuf::FieldDescriptor* descriptor) { ++ if (descriptor->label() == google::protobuf::FieldDescriptor::LABEL_REPEATED) { ++ if (descriptor->cpp_type() == ++ google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) { ++ if (repeated_composite_container::Release( ++ reinterpret_cast( ++ extension)) < 0) { ++ return -1; ++ } ++ } else { ++ if (repeated_scalar_container::Release( ++ reinterpret_cast( ++ extension)) < 0) { ++ return -1; ++ } ++ } ++ } else if (descriptor->cpp_type() == ++ google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) { ++ if (cmessage::ReleaseSubMessage( ++ GetMessage(self), descriptor, ++ reinterpret_cast(extension)) < 0) { ++ return -1; ++ } ++ } ++ ++ return 0; ++} ++ ++PyObject* subscript(ExtensionDict* self, PyObject* key) { ++ CFieldDescriptor* cdescriptor = InternalGetCDescriptorFromExtension( ++ key); ++ if (cdescriptor == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr py_cdescriptor(reinterpret_cast(cdescriptor)); ++ const google::protobuf::FieldDescriptor* descriptor = cdescriptor->descriptor; ++ if (descriptor == NULL) { ++ return NULL; ++ } ++ if (descriptor->label() != FieldDescriptor::LABEL_REPEATED && ++ descriptor->cpp_type() != FieldDescriptor::CPPTYPE_MESSAGE) { ++ return cmessage::InternalGetScalar(self->parent, descriptor); ++ } ++ ++ PyObject* value = PyDict_GetItem(self->values, key); ++ if (value != NULL) { ++ Py_INCREF(value); ++ return value; ++ } ++ ++ if (descriptor->label() != FieldDescriptor::LABEL_REPEATED && ++ descriptor->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) { ++ PyObject* sub_message = cmessage::InternalGetSubMessage( ++ self->parent, cdescriptor); ++ if (sub_message == NULL) { ++ return NULL; ++ } ++ PyDict_SetItem(self->values, key, sub_message); ++ return sub_message; ++ } ++ ++ if (descriptor->label() == FieldDescriptor::LABEL_REPEATED) { ++ if (descriptor->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) { ++ // COPIED ++ PyObject* py_container = PyObject_CallObject( ++ reinterpret_cast(&RepeatedCompositeContainer_Type), ++ NULL); ++ if (py_container == NULL) { ++ return NULL; ++ } ++ RepeatedCompositeContainer* container = ++ reinterpret_cast(py_container); ++ PyObject* field = cdescriptor->descriptor_field; ++ PyObject* message_type = PyObject_GetAttrString(field, "message_type"); ++ PyObject* concrete_class = PyObject_GetAttrString(message_type, ++ "_concrete_class"); ++ container->owner = self->owner; ++ container->parent = self->parent; ++ container->message = self->parent->message; ++ container->parent_field = cdescriptor; ++ container->subclass_init = concrete_class; ++ Py_DECREF(message_type); ++ PyDict_SetItem(self->values, key, py_container); ++ return py_container; ++ } else { ++ // COPIED ++ ScopedPyObjectPtr init_args(PyTuple_Pack(2, self->parent, cdescriptor)); ++ PyObject* py_container = PyObject_CallObject( ++ reinterpret_cast(&RepeatedScalarContainer_Type), ++ init_args); ++ if (py_container == NULL) { ++ return NULL; ++ } ++ PyDict_SetItem(self->values, key, py_container); ++ return py_container; ++ } ++ } ++ PyErr_SetString(PyExc_ValueError, "control reached unexpected line"); ++ return NULL; ++} ++ ++int ass_subscript(ExtensionDict* self, PyObject* key, PyObject* value) { ++ CFieldDescriptor* cdescriptor = InternalGetCDescriptorFromExtension( ++ key); ++ if (cdescriptor == NULL) { ++ return -1; ++ } ++ ScopedPyObjectPtr py_cdescriptor(reinterpret_cast(cdescriptor)); ++ const google::protobuf::FieldDescriptor* descriptor = cdescriptor->descriptor; ++ if (descriptor->label() != FieldDescriptor::LABEL_OPTIONAL || ++ descriptor->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) { ++ PyErr_SetString(PyExc_TypeError, "Extension is repeated and/or composite " ++ "type"); ++ return -1; ++ } ++ cmessage::AssureWritable(self->parent); ++ if (cmessage::InternalSetScalar(self->parent, descriptor, value) < 0) { ++ return -1; ++ } ++ // TODO(tibell): We shouldn't write scalars to the cache. ++ PyDict_SetItem(self->values, key, value); ++ return 0; ++} ++ ++PyObject* ClearExtension(ExtensionDict* self, PyObject* extension) { ++ CFieldDescriptor* cdescriptor = InternalGetCDescriptorFromExtension( ++ extension); ++ if (cdescriptor == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr py_cdescriptor(reinterpret_cast(cdescriptor)); ++ PyObject* value = PyDict_GetItem(self->values, extension); ++ if (value != NULL) { ++ if (ReleaseExtension(self, value, cdescriptor->descriptor) < 0) { ++ return NULL; ++ } ++ } ++ if (cmessage::ClearFieldByDescriptor(self->parent, ++ cdescriptor->descriptor) == NULL) { ++ return NULL; ++ } ++ if (PyDict_DelItem(self->values, extension) < 0) { ++ PyErr_Clear(); ++ } ++ Py_RETURN_NONE; ++} ++ ++PyObject* HasExtension(ExtensionDict* self, PyObject* extension) { ++ CFieldDescriptor* cdescriptor = InternalGetCDescriptorFromExtension( ++ extension); ++ if (cdescriptor == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr py_cdescriptor(reinterpret_cast(cdescriptor)); ++ PyObject* result = cmessage::HasFieldByDescriptor( ++ self->parent, cdescriptor->descriptor); ++ return result; ++} ++ ++PyObject* _FindExtensionByName(ExtensionDict* self, PyObject* name) { ++ ScopedPyObjectPtr extensions_by_name(PyObject_GetAttrString( ++ reinterpret_cast(self->parent), "_extensions_by_name")); ++ if (extensions_by_name == NULL) { ++ return NULL; ++ } ++ PyObject* result = PyDict_GetItem(extensions_by_name, name); ++ if (result == NULL) { ++ Py_RETURN_NONE; ++ } else { ++ Py_INCREF(result); ++ return result; ++ } ++} ++ ++int init(ExtensionDict* self, PyObject* args, PyObject* kwargs) { ++ self->parent = NULL; ++ self->message = NULL; ++ self->values = PyDict_New(); ++ return 0; ++} ++ ++void dealloc(ExtensionDict* self) { ++ Py_CLEAR(self->values); ++ self->owner.reset(); ++ Py_TYPE(self)->tp_free(reinterpret_cast(self)); ++} ++ ++static PyMappingMethods MpMethods = { ++ (lenfunc)len, /* mp_length */ ++ (binaryfunc)subscript, /* mp_subscript */ ++ (objobjargproc)ass_subscript,/* mp_ass_subscript */ ++}; ++ ++#define EDMETHOD(name, args, doc) { #name, (PyCFunction)name, args, doc } ++static PyMethodDef Methods[] = { ++ EDMETHOD(ClearExtension, METH_O, "Clears an extension from the object."), ++ EDMETHOD(HasExtension, METH_O, "Checks if the object has an extension."), ++ EDMETHOD(_FindExtensionByName, METH_O, ++ "Finds an extension by name."), ++ { NULL, NULL } ++}; ++ ++} // namespace extension_dict ++ ++PyTypeObject ExtensionDict_Type = { ++ PyVarObject_HEAD_INIT(&PyType_Type, 0) ++ "google.protobuf.internal." ++ "cpp._message.ExtensionDict", // tp_name ++ sizeof(ExtensionDict), // tp_basicsize ++ 0, // tp_itemsize ++ (destructor)extension_dict::dealloc, // tp_dealloc ++ 0, // tp_print ++ 0, // tp_getattr ++ 0, // tp_setattr ++ 0, // tp_compare ++ 0, // tp_repr ++ 0, // tp_as_number ++ 0, // tp_as_sequence ++ &extension_dict::MpMethods, // tp_as_mapping ++ 0, // tp_hash ++ 0, // tp_call ++ 0, // tp_str ++ 0, // tp_getattro ++ 0, // tp_setattro ++ 0, // tp_as_buffer ++ Py_TPFLAGS_DEFAULT, // tp_flags ++ "An extension dict", // tp_doc ++ 0, // tp_traverse ++ 0, // tp_clear ++ 0, // tp_richcompare ++ 0, // tp_weaklistoffset ++ 0, // tp_iter ++ 0, // tp_iternext ++ extension_dict::Methods, // tp_methods ++ 0, // tp_members ++ 0, // tp_getset ++ 0, // tp_base ++ 0, // tp_dict ++ 0, // tp_descr_get ++ 0, // tp_descr_set ++ 0, // tp_dictoffset ++ (initproc)extension_dict::init, // tp_init ++}; ++ ++} // namespace python ++} // namespace protobuf ++} // namespace google +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/extension_dict.h +@@ -0,0 +1,123 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: anuraag@google.com (Anuraag Agrawal) ++// Author: tibell@google.com (Johan Tibell) ++ ++#ifndef GOOGLE_PROTOBUF_PYTHON_CPP_EXTENSION_DICT_H__ ++#define GOOGLE_PROTOBUF_PYTHON_CPP_EXTENSION_DICT_H__ ++ ++#include ++ ++#include ++#ifndef _SHARED_PTR_H ++#include ++#endif ++ ++ ++namespace google { ++namespace protobuf { ++ ++class Message; ++class FieldDescriptor; ++ ++using internal::shared_ptr; ++ ++namespace python { ++ ++struct CMessage; ++struct CFieldDescriptor; ++ ++typedef struct ExtensionDict { ++ PyObject_HEAD; ++ shared_ptr owner; ++ CMessage* parent; ++ Message* message; ++ PyObject* values; ++} ExtensionDict; ++ ++extern PyTypeObject ExtensionDict_Type; ++ ++namespace extension_dict { ++ ++// Gets the _cdescriptor reference to a CFieldDescriptor object given a ++// python descriptor object. ++// ++// Returns a new reference. ++CFieldDescriptor* InternalGetCDescriptorFromExtension(PyObject* extension); ++ ++// Gets the number of extension values in this ExtensionDict as a python object. ++// ++// Returns a new reference. ++PyObject* len(ExtensionDict* self); ++ ++// Releases extensions referenced outside this dictionary to keep outside ++// references alive. ++// ++// Returns 0 on success, -1 on failure. ++int ReleaseExtension(ExtensionDict* self, ++ PyObject* extension, ++ const google::protobuf::FieldDescriptor* descriptor); ++ ++// Gets an extension from the dict for the given extension descriptor. ++// ++// Returns a new reference. ++PyObject* subscript(ExtensionDict* self, PyObject* key); ++ ++// Assigns a value to an extension in the dict. Can only be used for singular ++// simple types. ++// ++// Returns 0 on success, -1 on failure. ++int ass_subscript(ExtensionDict* self, PyObject* key, PyObject* value); ++ ++// Clears an extension from the dict. Will release the extension if there ++// is still an external reference left to it. ++// ++// Returns None on success. ++PyObject* ClearExtension(ExtensionDict* self, ++ PyObject* extension); ++ ++// Checks if the dict has an extension. ++// ++// Returns a new python boolean reference. ++PyObject* HasExtension(ExtensionDict* self, PyObject* extension); ++ ++// Gets an extension from the dict given the extension name as opposed to ++// descriptor. ++// ++// Returns a new reference. ++PyObject* _FindExtensionByName(ExtensionDict* self, PyObject* name); ++ ++} // namespace extension_dict ++} // namespace python ++} // namespace protobuf ++ ++} // namespace google ++#endif // GOOGLE_PROTOBUF_PYTHON_CPP_EXTENSION_DICT_H__ +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/message.cc +@@ -0,0 +1,2561 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: anuraag@google.com (Anuraag Agrawal) ++// Author: tibell@google.com (Johan Tibell) ++ ++#include ++ ++#include ++#ifndef _SHARED_PTR_H ++#include ++#endif ++#include ++#include ++ ++#ifndef PyVarObject_HEAD_INIT ++#define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, ++#endif ++#ifndef Py_TYPE ++#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#if PY_MAJOR_VERSION >= 3 ++ #define PyInt_Check PyLong_Check ++ #define PyInt_AsLong PyLong_AsLong ++ #define PyInt_FromLong PyLong_FromLong ++ #define PyInt_FromSize_t PyLong_FromSize_t ++ #define PyString_Check PyUnicode_Check ++ #define PyString_FromString PyUnicode_FromString ++ #define PyString_FromStringAndSize PyUnicode_FromStringAndSize ++ #if PY_VERSION_HEX < 0x03030000 ++ #error "Python 3.0 - 3.2 are not supported." ++ #else ++ #define PyString_AsString(ob) \ ++ (PyUnicode_Check(ob)? PyUnicode_AsUTF8(ob): PyBytes_AS_STRING(ob)) ++ #endif ++#endif ++ ++namespace google { ++namespace protobuf { ++namespace python { ++ ++// Forward declarations ++namespace cmessage { ++static PyObject* GetDescriptor(CMessage* self, PyObject* name); ++static string GetMessageName(CMessage* self); ++int InternalReleaseFieldByDescriptor( ++ const google::protobuf::FieldDescriptor* field_descriptor, ++ PyObject* composite_field, ++ google::protobuf::Message* parent_message); ++} // namespace cmessage ++ ++// --------------------------------------------------------------------- ++// Visiting the composite children of a CMessage ++ ++struct ChildVisitor { ++ // Returns 0 on success, -1 on failure. ++ int VisitRepeatedCompositeContainer(RepeatedCompositeContainer* container) { ++ return 0; ++ } ++ ++ // Returns 0 on success, -1 on failure. ++ int VisitRepeatedScalarContainer(RepeatedScalarContainer* container) { ++ return 0; ++ } ++ ++ // Returns 0 on success, -1 on failure. ++ int VisitCMessage(CMessage* cmessage, ++ const google::protobuf::FieldDescriptor* field_descriptor) { ++ return 0; ++ } ++}; ++ ++// Apply a function to a composite field. Does nothing if child is of ++// non-composite type. ++template ++static int VisitCompositeField(const FieldDescriptor* descriptor, ++ PyObject* child, ++ Visitor visitor) { ++ if (descriptor->label() == google::protobuf::FieldDescriptor::LABEL_REPEATED) { ++ if (descriptor->cpp_type() == google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) { ++ RepeatedCompositeContainer* container = ++ reinterpret_cast(child); ++ if (visitor.VisitRepeatedCompositeContainer(container) == -1) ++ return -1; ++ } else { ++ RepeatedScalarContainer* container = ++ reinterpret_cast(child); ++ if (visitor.VisitRepeatedScalarContainer(container) == -1) ++ return -1; ++ } ++ } else if (descriptor->cpp_type() == ++ google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) { ++ CMessage* cmsg = reinterpret_cast(child); ++ if (visitor.VisitCMessage(cmsg, descriptor) == -1) ++ return -1; ++ } ++ // The ExtensionDict might contain non-composite fields, which we ++ // skip here. ++ return 0; ++} ++ ++// Visit each composite field and extension field of this CMessage. ++// Returns -1 on error and 0 on success. ++template ++int ForEachCompositeField(CMessage* self, Visitor visitor) { ++ Py_ssize_t pos = 0; ++ PyObject* key; ++ PyObject* field; ++ ++ // Visit normal fields. ++ while (PyDict_Next(self->composite_fields, &pos, &key, &field)) { ++ PyObject* cdescriptor = cmessage::GetDescriptor(self, key); ++ if (cdescriptor != NULL) { ++ const google::protobuf::FieldDescriptor* descriptor = ++ reinterpret_cast(cdescriptor)->descriptor; ++ if (VisitCompositeField(descriptor, field, visitor) == -1) ++ return -1; ++ } ++ } ++ ++ // Visit extension fields. ++ if (self->extensions != NULL) { ++ while (PyDict_Next(self->extensions->values, &pos, &key, &field)) { ++ CFieldDescriptor* cdescriptor = ++ extension_dict::InternalGetCDescriptorFromExtension(key); ++ if (cdescriptor == NULL) ++ return -1; ++ if (VisitCompositeField(cdescriptor->descriptor, field, visitor) == -1) ++ return -1; ++ } ++ } ++ ++ return 0; ++} ++ ++// --------------------------------------------------------------------- ++ ++// Constants used for integer type range checking. ++PyObject* kPythonZero; ++PyObject* kint32min_py; ++PyObject* kint32max_py; ++PyObject* kuint32max_py; ++PyObject* kint64min_py; ++PyObject* kint64max_py; ++PyObject* kuint64max_py; ++ ++PyObject* EnumTypeWrapper_class; ++PyObject* EncodeError_class; ++PyObject* DecodeError_class; ++PyObject* PickleError_class; ++ ++// Constant PyString values used for GetAttr/GetItem. ++static PyObject* kDESCRIPTOR; ++static PyObject* k__descriptors; ++static PyObject* kfull_name; ++static PyObject* kname; ++static PyObject* kmessage_type; ++static PyObject* kis_extendable; ++static PyObject* kextensions_by_name; ++static PyObject* k_extensions_by_name; ++static PyObject* k_extensions_by_number; ++static PyObject* k_concrete_class; ++static PyObject* kfields_by_name; ++ ++static CDescriptorPool* descriptor_pool; ++ ++/* Is 64bit */ ++void FormatTypeError(PyObject* arg, char* expected_types) { ++ PyObject* repr = PyObject_Repr(arg); ++ if (repr) { ++ PyErr_Format(PyExc_TypeError, ++ "%.100s has type %.100s, but expected one of: %s", ++ PyString_AsString(repr), ++ Py_TYPE(arg)->tp_name, ++ expected_types); ++ Py_DECREF(repr); ++ } ++} ++ ++template ++bool CheckAndGetInteger( ++ PyObject* arg, T* value, PyObject* min, PyObject* max) { ++ bool is_long = PyLong_Check(arg); ++#if PY_MAJOR_VERSION < 3 ++ if (!PyInt_Check(arg) && !is_long) { ++ FormatTypeError(arg, "int, long"); ++ return false; ++ } ++ if (PyObject_Compare(min, arg) > 0 || PyObject_Compare(max, arg) < 0) { ++#else ++ if (!is_long) { ++ FormatTypeError(arg, "int"); ++ return false; ++ } ++ if (PyObject_RichCompareBool(min, arg, Py_LE) != 1 || ++ PyObject_RichCompareBool(max, arg, Py_GE) != 1) { ++#endif ++ PyObject *s = PyObject_Str(arg); ++ if (s) { ++ PyErr_Format(PyExc_ValueError, ++ "Value out of range: %s", ++ PyString_AsString(s)); ++ Py_DECREF(s); ++ } ++ return false; ++ } ++#if PY_MAJOR_VERSION < 3 ++ if (!is_long) { ++ *value = static_cast(PyInt_AsLong(arg)); ++ } else // NOLINT ++#endif ++ { ++ if (min == kPythonZero) { ++ *value = static_cast(PyLong_AsUnsignedLongLong(arg)); ++ } else { ++ *value = static_cast(PyLong_AsLongLong(arg)); ++ } ++ } ++ return true; ++} ++ ++// These are referenced by repeated_scalar_container, and must ++// be explicitly instantiated. ++template bool CheckAndGetInteger( ++ PyObject*, int32*, PyObject*, PyObject*); ++template bool CheckAndGetInteger( ++ PyObject*, int64*, PyObject*, PyObject*); ++template bool CheckAndGetInteger( ++ PyObject*, uint32*, PyObject*, PyObject*); ++template bool CheckAndGetInteger( ++ PyObject*, uint64*, PyObject*, PyObject*); ++ ++bool CheckAndGetDouble(PyObject* arg, double* value) { ++ if (!PyInt_Check(arg) && !PyLong_Check(arg) && ++ !PyFloat_Check(arg)) { ++ FormatTypeError(arg, "int, long, float"); ++ return false; ++ } ++ *value = PyFloat_AsDouble(arg); ++ return true; ++} ++ ++bool CheckAndGetFloat(PyObject* arg, float* value) { ++ double double_value; ++ if (!CheckAndGetDouble(arg, &double_value)) { ++ return false; ++ } ++ *value = static_cast(double_value); ++ return true; ++} ++ ++bool CheckAndGetBool(PyObject* arg, bool* value) { ++ if (!PyInt_Check(arg) && !PyBool_Check(arg) && !PyLong_Check(arg)) { ++ FormatTypeError(arg, "int, long, bool"); ++ return false; ++ } ++ *value = static_cast(PyInt_AsLong(arg)); ++ return true; ++} ++ ++bool CheckAndSetString( ++ PyObject* arg, google::protobuf::Message* message, ++ const google::protobuf::FieldDescriptor* descriptor, ++ const google::protobuf::Reflection* reflection, ++ bool append, ++ int index) { ++ GOOGLE_DCHECK(descriptor->type() == google::protobuf::FieldDescriptor::TYPE_STRING || ++ descriptor->type() == google::protobuf::FieldDescriptor::TYPE_BYTES); ++ if (descriptor->type() == google::protobuf::FieldDescriptor::TYPE_STRING) { ++ if (!PyBytes_Check(arg) && !PyUnicode_Check(arg)) { ++ FormatTypeError(arg, "bytes, unicode"); ++ return false; ++ } ++ ++ if (PyBytes_Check(arg)) { ++ PyObject* unicode = PyUnicode_FromEncodedObject(arg, "ascii", NULL); ++ if (unicode == NULL) { ++ PyObject* repr = PyObject_Repr(arg); ++ PyErr_Format(PyExc_ValueError, ++ "%s has type str, but isn't in 7-bit ASCII " ++ "encoding. Non-ASCII strings must be converted to " ++ "unicode objects before being added.", ++ PyString_AsString(repr)); ++ Py_DECREF(repr); ++ return false; ++ } else { ++ Py_DECREF(unicode); ++ } ++ } ++ } else if (!PyBytes_Check(arg)) { ++ FormatTypeError(arg, "bytes"); ++ return false; ++ } ++ ++ PyObject* encoded_string = NULL; ++ if (descriptor->type() == google::protobuf::FieldDescriptor::TYPE_STRING) { ++ if (PyBytes_Check(arg)) { ++#if PY_MAJOR_VERSION < 3 ++ encoded_string = PyString_AsEncodedObject(arg, "utf-8", NULL); ++#else ++ encoded_string = arg; // Already encoded. ++ Py_INCREF(encoded_string); ++#endif ++ } else { ++ encoded_string = PyUnicode_AsEncodedObject(arg, "utf-8", NULL); ++ } ++ } else { ++ // In this case field type is "bytes". ++ encoded_string = arg; ++ Py_INCREF(encoded_string); ++ } ++ ++ if (encoded_string == NULL) { ++ return false; ++ } ++ ++ char* value; ++ Py_ssize_t value_len; ++ if (PyBytes_AsStringAndSize(encoded_string, &value, &value_len) < 0) { ++ Py_DECREF(encoded_string); ++ return false; ++ } ++ ++ string value_string(value, value_len); ++ if (append) { ++ reflection->AddString(message, descriptor, value_string); ++ } else if (index < 0) { ++ reflection->SetString(message, descriptor, value_string); ++ } else { ++ reflection->SetRepeatedString(message, descriptor, index, value_string); ++ } ++ Py_DECREF(encoded_string); ++ return true; ++} ++ ++PyObject* ToStringObject( ++ const google::protobuf::FieldDescriptor* descriptor, string value) { ++ if (descriptor->type() != google::protobuf::FieldDescriptor::TYPE_STRING) { ++ return PyBytes_FromStringAndSize(value.c_str(), value.length()); ++ } ++ ++ PyObject* result = PyUnicode_DecodeUTF8(value.c_str(), value.length(), NULL); ++ // If the string can't be decoded in UTF-8, just return a string object that ++ // contains the raw bytes. This can't happen if the value was assigned using ++ // the members of the Python message object, but can happen if the values were ++ // parsed from the wire (binary). ++ if (result == NULL) { ++ PyErr_Clear(); ++ result = PyBytes_FromStringAndSize(value.c_str(), value.length()); ++ } ++ return result; ++} ++ ++google::protobuf::DynamicMessageFactory* global_message_factory; ++ ++namespace cmessage { ++ ++static int MaybeReleaseOverlappingOneofField( ++ CMessage* cmessage, ++ const google::protobuf::FieldDescriptor* field) { ++#ifdef GOOGLE_PROTOBUF_HAS_ONEOF ++ google::protobuf::Message* message = cmessage->message; ++ const google::protobuf::Reflection* reflection = message->GetReflection(); ++ if (!field->containing_oneof() || ++ !reflection->HasOneof(*message, field->containing_oneof()) || ++ reflection->HasField(*message, field)) { ++ // No other field in this oneof, no need to release. ++ return 0; ++ } ++ ++ const OneofDescriptor* oneof = field->containing_oneof(); ++ const FieldDescriptor* existing_field = ++ reflection->GetOneofFieldDescriptor(*message, oneof); ++ if (existing_field->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) { ++ // Non-message fields don't need to be released. ++ return 0; ++ } ++ const char* field_name = existing_field->name().c_str(); ++ PyObject* child_message = PyDict_GetItemString( ++ cmessage->composite_fields, field_name); ++ if (child_message == NULL) { ++ // No python reference to this field so no need to release. ++ return 0; ++ } ++ ++ if (InternalReleaseFieldByDescriptor( ++ existing_field, child_message, message) < 0) { ++ return -1; ++ } ++ return PyDict_DelItemString(cmessage->composite_fields, field_name); ++#else ++ return 0; ++#endif ++} ++ ++// --------------------------------------------------------------------- ++// Making a message writable ++ ++static google::protobuf::Message* GetMutableMessage( ++ CMessage* parent, ++ const google::protobuf::FieldDescriptor* parent_field) { ++ google::protobuf::Message* parent_message = parent->message; ++ const google::protobuf::Reflection* reflection = parent_message->GetReflection(); ++ if (MaybeReleaseOverlappingOneofField(parent, parent_field) < 0) { ++ return NULL; ++ } ++ return reflection->MutableMessage( ++ parent_message, parent_field, global_message_factory); ++} ++ ++struct FixupMessageReference : public ChildVisitor { ++ // message must outlive this object. ++ explicit FixupMessageReference(google::protobuf::Message* message) : ++ message_(message) {} ++ ++ int VisitRepeatedCompositeContainer(RepeatedCompositeContainer* container) { ++ container->message = message_; ++ return 0; ++ } ++ ++ int VisitRepeatedScalarContainer(RepeatedScalarContainer* container) { ++ container->message = message_; ++ return 0; ++ } ++ ++ private: ++ google::protobuf::Message* message_; ++}; ++ ++int AssureWritable(CMessage* self) { ++ if (self == NULL || !self->read_only) { ++ return 0; ++ } ++ ++ if (self->parent == NULL) { ++ // If parent is NULL but we are trying to modify a read-only message, this ++ // is a reference to a constant default instance that needs to be replaced ++ // with a mutable top-level message. ++ const Message* prototype = global_message_factory->GetPrototype( ++ self->message->GetDescriptor()); ++ self->message = prototype->New(); ++ self->owner.reset(self->message); ++ } else { ++ // Otherwise, we need a mutable child message. ++ if (AssureWritable(self->parent) == -1) ++ return -1; ++ ++ // Make self->message writable. ++ google::protobuf::Message* parent_message = self->parent->message; ++ google::protobuf::Message* mutable_message = GetMutableMessage( ++ self->parent, ++ self->parent_field->descriptor); ++ if (mutable_message == NULL) { ++ return -1; ++ } ++ self->message = mutable_message; ++ } ++ self->read_only = false; ++ ++ // When a CMessage is made writable its Message pointer is updated ++ // to point to a new mutable Message. When that happens we need to ++ // update any references to the old, read-only CMessage. There are ++ // three places such references occur: RepeatedScalarContainer, ++ // RepeatedCompositeContainer, and ExtensionDict. ++ if (self->extensions != NULL) ++ self->extensions->message = self->message; ++ if (ForEachCompositeField(self, FixupMessageReference(self->message)) == -1) ++ return -1; ++ ++ return 0; ++} ++ ++// --- Globals: ++ ++static PyObject* GetDescriptor(CMessage* self, PyObject* name) { ++ PyObject* descriptors = ++ PyDict_GetItem(Py_TYPE(self)->tp_dict, k__descriptors); ++ if (descriptors == NULL) { ++ PyErr_SetString(PyExc_TypeError, "No __descriptors"); ++ return NULL; ++ } ++ ++ return PyDict_GetItem(descriptors, name); ++} ++ ++static const google::protobuf::Message* CreateMessage(const char* message_type) { ++ string message_name(message_type); ++ const google::protobuf::Descriptor* descriptor = ++ GetDescriptorPool()->FindMessageTypeByName(message_name); ++ if (descriptor == NULL) { ++ PyErr_SetString(PyExc_TypeError, message_type); ++ return NULL; ++ } ++ return global_message_factory->GetPrototype(descriptor); ++} ++ ++// If cmessage_list is not NULL, this function releases values into the ++// container CMessages instead of just removing. Repeated composite container ++// needs to do this to make sure CMessages stay alive if they're still ++// referenced after deletion. Repeated scalar container doesn't need to worry. ++int InternalDeleteRepeatedField( ++ google::protobuf::Message* message, ++ const google::protobuf::FieldDescriptor* field_descriptor, ++ PyObject* slice, ++ PyObject* cmessage_list) { ++ Py_ssize_t length, from, to, step, slice_length; ++ const google::protobuf::Reflection* reflection = message->GetReflection(); ++ int min, max; ++ length = reflection->FieldSize(*message, field_descriptor); ++ ++ if (PyInt_Check(slice) || PyLong_Check(slice)) { ++ from = to = PyLong_AsLong(slice); ++ if (from < 0) { ++ from = to = length + from; ++ } ++ step = 1; ++ min = max = from; ++ ++ // Range check. ++ if (from < 0 || from >= length) { ++ PyErr_Format(PyExc_IndexError, "list assignment index out of range"); ++ return -1; ++ } ++ } else if (PySlice_Check(slice)) { ++ from = to = step = slice_length = 0; ++ PySlice_GetIndicesEx( ++#if PY_MAJOR_VERSION < 3 ++ reinterpret_cast(slice), ++#else ++ slice, ++#endif ++ length, &from, &to, &step, &slice_length); ++ if (from < to) { ++ min = from; ++ max = to - 1; ++ } else { ++ min = to + 1; ++ max = from; ++ } ++ } else { ++ PyErr_SetString(PyExc_TypeError, "list indices must be integers"); ++ return -1; ++ } ++ ++ Py_ssize_t i = from; ++ std::vector to_delete(length, false); ++ while (i >= min && i <= max) { ++ to_delete[i] = true; ++ i += step; ++ } ++ ++ to = 0; ++ for (i = 0; i < length; ++i) { ++ if (!to_delete[i]) { ++ if (i != to) { ++ reflection->SwapElements(message, field_descriptor, i, to); ++ if (cmessage_list != NULL) { ++ // If a list of cmessages is passed in (i.e. from a repeated ++ // composite container), swap those as well to correspond to the ++ // swaps in the underlying message so they're in the right order ++ // when we start releasing. ++ PyObject* tmp = PyList_GET_ITEM(cmessage_list, i); ++ PyList_SET_ITEM(cmessage_list, i, ++ PyList_GET_ITEM(cmessage_list, to)); ++ PyList_SET_ITEM(cmessage_list, to, tmp); ++ } ++ } ++ ++to; ++ } ++ } ++ ++ while (i > to) { ++ if (cmessage_list == NULL) { ++ reflection->RemoveLast(message, field_descriptor); ++ } else { ++ CMessage* last_cmessage = reinterpret_cast( ++ PyList_GET_ITEM(cmessage_list, PyList_GET_SIZE(cmessage_list) - 1)); ++ repeated_composite_container::ReleaseLastTo( ++ field_descriptor, message, last_cmessage); ++ if (PySequence_DelItem(cmessage_list, -1) < 0) { ++ return -1; ++ } ++ } ++ --i; ++ } ++ ++ return 0; ++} ++ ++int InitAttributes(CMessage* self, PyObject* arg, PyObject* kwargs) { ++ ScopedPyObjectPtr descriptor; ++ if (arg == NULL) { ++ descriptor.reset( ++ PyObject_GetAttr(reinterpret_cast(self), kDESCRIPTOR)); ++ if (descriptor == NULL) { ++ return NULL; ++ } ++ } else { ++ descriptor.reset(arg); ++ descriptor.inc(); ++ } ++ ScopedPyObjectPtr is_extendable(PyObject_GetAttr(descriptor, kis_extendable)); ++ if (is_extendable == NULL) { ++ return NULL; ++ } ++ int retcode = PyObject_IsTrue(is_extendable); ++ if (retcode == -1) { ++ return NULL; ++ } ++ if (retcode) { ++ PyObject* py_extension_dict = PyObject_CallObject( ++ reinterpret_cast(&ExtensionDict_Type), NULL); ++ if (py_extension_dict == NULL) { ++ return NULL; ++ } ++ ExtensionDict* extension_dict = reinterpret_cast( ++ py_extension_dict); ++ extension_dict->parent = self; ++ extension_dict->message = self->message; ++ self->extensions = extension_dict; ++ } ++ ++ if (kwargs == NULL) { ++ return 0; ++ } ++ ++ Py_ssize_t pos = 0; ++ PyObject* name; ++ PyObject* value; ++ while (PyDict_Next(kwargs, &pos, &name, &value)) { ++ if (!PyString_Check(name)) { ++ PyErr_SetString(PyExc_ValueError, "Field name must be a string"); ++ return -1; ++ } ++ PyObject* py_cdescriptor = GetDescriptor(self, name); ++ if (py_cdescriptor == NULL) { ++ PyErr_Format(PyExc_ValueError, "Protocol message has no \"%s\" field.", ++ PyString_AsString(name)); ++ return -1; ++ } ++ const google::protobuf::FieldDescriptor* descriptor = ++ reinterpret_cast(py_cdescriptor)->descriptor; ++ if (descriptor->label() == google::protobuf::FieldDescriptor::LABEL_REPEATED) { ++ ScopedPyObjectPtr container(GetAttr(self, name)); ++ if (container == NULL) { ++ return -1; ++ } ++ if (descriptor->cpp_type() == google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) { ++ if (repeated_composite_container::Extend( ++ reinterpret_cast(container.get()), ++ value) ++ == NULL) { ++ return -1; ++ } ++ } else { ++ if (repeated_scalar_container::Extend( ++ reinterpret_cast(container.get()), ++ value) == ++ NULL) { ++ return -1; ++ } ++ } ++ } else if (descriptor->cpp_type() == ++ google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) { ++ ScopedPyObjectPtr message(GetAttr(self, name)); ++ if (message == NULL) { ++ return -1; ++ } ++ if (MergeFrom(reinterpret_cast(message.get()), ++ value) == NULL) { ++ return -1; ++ } ++ } else { ++ if (SetAttr(self, name, value) < 0) { ++ return -1; ++ } ++ } ++ } ++ return 0; ++} ++ ++static PyObject* New(PyTypeObject* type, PyObject* args, PyObject* kwargs) { ++ CMessage* self = reinterpret_cast(type->tp_alloc(type, 0)); ++ if (self == NULL) { ++ return NULL; ++ } ++ ++ self->message = NULL; ++ self->parent = NULL; ++ self->parent_field = NULL; ++ self->read_only = false; ++ self->extensions = NULL; ++ ++ self->composite_fields = PyDict_New(); ++ if (self->composite_fields == NULL) { ++ return NULL; ++ } ++ return reinterpret_cast(self); ++} ++ ++PyObject* NewEmpty(PyObject* type) { ++ return New(reinterpret_cast(type), NULL, NULL); ++} ++ ++static int Init(CMessage* self, PyObject* args, PyObject* kwargs) { ++ if (kwargs == NULL) { ++ // TODO(anuraag): Set error ++ return -1; ++ } ++ ++ PyObject* descriptor = PyTuple_GetItem(args, 0); ++ if (descriptor == NULL || PyTuple_Size(args) != 1) { ++ PyErr_SetString(PyExc_ValueError, "args must contain one arg: descriptor"); ++ return -1; ++ } ++ ++ ScopedPyObjectPtr py_message_type(PyObject_GetAttr(descriptor, kfull_name)); ++ if (py_message_type == NULL) { ++ return -1; ++ } ++ ++ const char* message_type = PyString_AsString(py_message_type.get()); ++ const google::protobuf::Message* message = CreateMessage(message_type); ++ if (message == NULL) { ++ return -1; ++ } ++ ++ self->message = message->New(); ++ self->owner.reset(self->message); ++ ++ if (InitAttributes(self, descriptor, kwargs) < 0) { ++ return -1; ++ } ++ return 0; ++} ++ ++// --------------------------------------------------------------------- ++// Deallocating a CMessage ++// ++// Deallocating a CMessage requires that we clear any weak references ++// from children to the message being deallocated. ++ ++// Clear the weak reference from the child to the parent. ++struct ClearWeakReferences : public ChildVisitor { ++ int VisitRepeatedCompositeContainer(RepeatedCompositeContainer* container) { ++ container->parent = NULL; ++ // The elements in the container have the same parent as the ++ // container itself, so NULL out that pointer as well. ++ const Py_ssize_t n = PyList_GET_SIZE(container->child_messages); ++ for (Py_ssize_t i = 0; i < n; ++i) { ++ CMessage* child_cmessage = reinterpret_cast( ++ PyList_GET_ITEM(container->child_messages, i)); ++ child_cmessage->parent = NULL; ++ } ++ return 0; ++ } ++ ++ int VisitRepeatedScalarContainer(RepeatedScalarContainer* container) { ++ container->parent = NULL; ++ return 0; ++ } ++ ++ int VisitCMessage(CMessage* cmessage, ++ const google::protobuf::FieldDescriptor* field_descriptor) { ++ cmessage->parent = NULL; ++ return 0; ++ } ++}; ++ ++static void Dealloc(CMessage* self) { ++ // Null out all weak references from children to this message. ++ GOOGLE_CHECK_EQ(0, ForEachCompositeField(self, ClearWeakReferences())); ++ ++ Py_CLEAR(self->extensions); ++ Py_CLEAR(self->composite_fields); ++ self->owner.reset(); ++ Py_TYPE(self)->tp_free(reinterpret_cast(self)); ++} ++ ++// --------------------------------------------------------------------- ++ ++ ++PyObject* IsInitialized(CMessage* self, PyObject* args) { ++ PyObject* errors = NULL; ++ if (PyArg_ParseTuple(args, "|O", &errors) < 0) { ++ return NULL; ++ } ++ if (self->message->IsInitialized()) { ++ Py_RETURN_TRUE; ++ } ++ if (errors != NULL) { ++ ScopedPyObjectPtr initialization_errors( ++ FindInitializationErrors(self)); ++ if (initialization_errors == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr extend_name(PyString_FromString("extend")); ++ if (extend_name == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr result(PyObject_CallMethodObjArgs( ++ errors, ++ extend_name.get(), ++ initialization_errors.get(), ++ NULL)); ++ if (result == NULL) { ++ return NULL; ++ } ++ } ++ Py_RETURN_FALSE; ++} ++ ++PyObject* HasFieldByDescriptor( ++ CMessage* self, const google::protobuf::FieldDescriptor* field_descriptor) { ++ google::protobuf::Message* message = self->message; ++ if (!FIELD_BELONGS_TO_MESSAGE(field_descriptor, message)) { ++ PyErr_SetString(PyExc_KeyError, ++ "Field does not belong to message!"); ++ return NULL; ++ } ++ if (field_descriptor->label() == google::protobuf::FieldDescriptor::LABEL_REPEATED) { ++ PyErr_SetString(PyExc_KeyError, ++ "Field is repeated. A singular method is required."); ++ return NULL; ++ } ++ bool has_field = ++ message->GetReflection()->HasField(*message, field_descriptor); ++ return PyBool_FromLong(has_field ? 1 : 0); ++} ++ ++const google::protobuf::FieldDescriptor* FindFieldWithOneofs( ++ const google::protobuf::Message* message, const char* field_name, bool* in_oneof) { ++ const google::protobuf::Descriptor* descriptor = message->GetDescriptor(); ++ const google::protobuf::FieldDescriptor* field_descriptor = ++ descriptor->FindFieldByName(field_name); ++ if (field_descriptor == NULL) { ++ const google::protobuf::OneofDescriptor* oneof_desc = ++ message->GetDescriptor()->FindOneofByName(field_name); ++ if (oneof_desc == NULL) { ++ *in_oneof = false; ++ return NULL; ++ } else { ++ *in_oneof = true; ++ return message->GetReflection()->GetOneofFieldDescriptor( ++ *message, oneof_desc); ++ } ++ } ++ return field_descriptor; ++} ++ ++PyObject* HasField(CMessage* self, PyObject* arg) { ++#if PY_MAJOR_VERSION < 3 ++ char* field_name; ++ if (PyString_AsStringAndSize(arg, &field_name, NULL) < 0) { ++#else ++ char* field_name = PyUnicode_AsUTF8(arg); ++ if (!field_name) { ++#endif ++ return NULL; ++ } ++ ++ google::protobuf::Message* message = self->message; ++ const google::protobuf::Descriptor* descriptor = message->GetDescriptor(); ++ bool is_in_oneof; ++ const google::protobuf::FieldDescriptor* field_descriptor = ++ FindFieldWithOneofs(message, field_name, &is_in_oneof); ++ if (field_descriptor == NULL) { ++ if (!is_in_oneof) { ++ PyErr_Format(PyExc_ValueError, "Unknown field %s.", field_name); ++ return NULL; ++ } else { ++ Py_RETURN_FALSE; ++ } ++ } ++ ++ if (field_descriptor->label() == google::protobuf::FieldDescriptor::LABEL_REPEATED) { ++ PyErr_Format(PyExc_ValueError, ++ "Protocol message has no singular \"%s\" field.", field_name); ++ return NULL; ++ } ++ ++ bool has_field = ++ message->GetReflection()->HasField(*message, field_descriptor); ++ if (!has_field && field_descriptor->cpp_type() == ++ google::protobuf::FieldDescriptor::CPPTYPE_ENUM) { ++ // We may have an invalid enum value stored in the UnknownFieldSet and need ++ // to check presence in there as well. ++ const google::protobuf::UnknownFieldSet& unknown_field_set = ++ message->GetReflection()->GetUnknownFields(*message); ++ for (int i = 0; i < unknown_field_set.field_count(); ++i) { ++ if (unknown_field_set.field(i).number() == field_descriptor->number()) { ++ Py_RETURN_TRUE; ++ } ++ } ++ Py_RETURN_FALSE; ++ } ++ return PyBool_FromLong(has_field ? 1 : 0); ++} ++ ++PyObject* ClearExtension(CMessage* self, PyObject* arg) { ++ if (self->extensions != NULL) { ++ return extension_dict::ClearExtension(self->extensions, arg); ++ } ++ PyErr_SetString(PyExc_TypeError, "Message is not extendable"); ++ return NULL; ++} ++ ++PyObject* HasExtension(CMessage* self, PyObject* arg) { ++ if (self->extensions != NULL) { ++ return extension_dict::HasExtension(self->extensions, arg); ++ } ++ PyErr_SetString(PyExc_TypeError, "Message is not extendable"); ++ return NULL; ++} ++ ++// --------------------------------------------------------------------- ++// Releasing messages ++// ++// The Python API's ClearField() and Clear() methods behave ++// differently than their C++ counterparts. While the C++ versions ++// clears the children the Python versions detaches the children, ++// without touching their content. This impedance mismatch causes ++// some complexity in the implementation, which is captured in this ++// section. ++// ++// When a CMessage field is cleared we need to: ++// ++// * Release the Message used as the backing store for the CMessage ++// from its parent. ++// ++// * Change the owner field of the released CMessage and all of its ++// children to point to the newly released Message. ++// ++// * Clear the weak references from the released CMessage to the ++// parent. ++// ++// When a RepeatedCompositeContainer field is cleared we need to: ++// ++// * Release all the Message used as the backing store for the ++// CMessages stored in the container. ++// ++// * Change the owner field of all the released CMessage and all of ++// their children to point to the newly released Messages. ++// ++// * Clear the weak references from the released container to the ++// parent. ++ ++struct SetOwnerVisitor : public ChildVisitor { ++ // new_owner must outlive this object. ++ explicit SetOwnerVisitor(const shared_ptr& new_owner) ++ : new_owner_(new_owner) {} ++ ++ int VisitRepeatedCompositeContainer(RepeatedCompositeContainer* container) { ++ repeated_composite_container::SetOwner(container, new_owner_); ++ return 0; ++ } ++ ++ int VisitRepeatedScalarContainer(RepeatedScalarContainer* container) { ++ repeated_scalar_container::SetOwner(container, new_owner_); ++ return 0; ++ } ++ ++ int VisitCMessage(CMessage* cmessage, ++ const google::protobuf::FieldDescriptor* field_descriptor) { ++ return SetOwner(cmessage, new_owner_); ++ } ++ ++ private: ++ const shared_ptr& new_owner_; ++}; ++ ++// Change the owner of this CMessage and all its children, recursively. ++int SetOwner(CMessage* self, const shared_ptr& new_owner) { ++ self->owner = new_owner; ++ if (ForEachCompositeField(self, SetOwnerVisitor(new_owner)) == -1) ++ return -1; ++ return 0; ++} ++ ++// Releases the message specified by 'field' and returns the ++// pointer. If the field does not exist a new message is created using ++// 'descriptor'. The caller takes ownership of the returned pointer. ++Message* ReleaseMessage(google::protobuf::Message* message, ++ const google::protobuf::Descriptor* descriptor, ++ const google::protobuf::FieldDescriptor* field_descriptor) { ++ Message* released_message = message->GetReflection()->ReleaseMessage( ++ message, field_descriptor, global_message_factory); ++ // ReleaseMessage will return NULL which differs from ++ // child_cmessage->message, if the field does not exist. In this case, ++ // the latter points to the default instance via a const_cast<>, so we ++ // have to reset it to a new mutable object since we are taking ownership. ++ if (released_message == NULL) { ++ const Message* prototype = global_message_factory->GetPrototype( ++ descriptor); ++ GOOGLE_DCHECK(prototype != NULL); ++ released_message = prototype->New(); ++ } ++ ++ return released_message; ++} ++ ++int ReleaseSubMessage(google::protobuf::Message* message, ++ const google::protobuf::FieldDescriptor* field_descriptor, ++ CMessage* child_cmessage) { ++ // Release the Message ++ shared_ptr released_message(ReleaseMessage( ++ message, child_cmessage->message->GetDescriptor(), field_descriptor)); ++ child_cmessage->message = released_message.get(); ++ child_cmessage->owner.swap(released_message); ++ child_cmessage->parent = NULL; ++ child_cmessage->parent_field = NULL; ++ child_cmessage->read_only = false; ++ return ForEachCompositeField(child_cmessage, ++ SetOwnerVisitor(child_cmessage->owner)); ++} ++ ++struct ReleaseChild : public ChildVisitor { ++ // message must outlive this object. ++ explicit ReleaseChild(google::protobuf::Message* parent_message) : ++ parent_message_(parent_message) {} ++ ++ int VisitRepeatedCompositeContainer(RepeatedCompositeContainer* container) { ++ return repeated_composite_container::Release( ++ reinterpret_cast(container)); ++ } ++ ++ int VisitRepeatedScalarContainer(RepeatedScalarContainer* container) { ++ return repeated_scalar_container::Release( ++ reinterpret_cast(container)); ++ } ++ ++ int VisitCMessage(CMessage* cmessage, ++ const google::protobuf::FieldDescriptor* field_descriptor) { ++ return ReleaseSubMessage(parent_message_, field_descriptor, ++ reinterpret_cast(cmessage)); ++ } ++ ++ google::protobuf::Message* parent_message_; ++}; ++ ++int InternalReleaseFieldByDescriptor( ++ const google::protobuf::FieldDescriptor* field_descriptor, ++ PyObject* composite_field, ++ google::protobuf::Message* parent_message) { ++ return VisitCompositeField( ++ field_descriptor, ++ composite_field, ++ ReleaseChild(parent_message)); ++} ++ ++int InternalReleaseField(CMessage* self, PyObject* composite_field, ++ PyObject* name) { ++ PyObject* cdescriptor = GetDescriptor(self, name); ++ if (cdescriptor != NULL) { ++ const google::protobuf::FieldDescriptor* descriptor = ++ reinterpret_cast(cdescriptor)->descriptor; ++ return InternalReleaseFieldByDescriptor( ++ descriptor, composite_field, self->message); ++ } ++ ++ return 0; ++} ++ ++PyObject* ClearFieldByDescriptor( ++ CMessage* self, ++ const google::protobuf::FieldDescriptor* descriptor) { ++ if (!FIELD_BELONGS_TO_MESSAGE(descriptor, self->message)) { ++ PyErr_SetString(PyExc_KeyError, ++ "Field does not belong to message!"); ++ return NULL; ++ } ++ AssureWritable(self); ++ self->message->GetReflection()->ClearField(self->message, descriptor); ++ Py_RETURN_NONE; ++} ++ ++PyObject* ClearField(CMessage* self, PyObject* arg) { ++ char* field_name; ++ if (!PyString_Check(arg)) { ++ PyErr_SetString(PyExc_TypeError, "field name must be a string"); ++ return NULL; ++ } ++#if PY_MAJOR_VERSION < 3 ++ if (PyString_AsStringAndSize(arg, &field_name, NULL) < 0) { ++ return NULL; ++ } ++#else ++ field_name = PyUnicode_AsUTF8(arg); ++#endif ++ AssureWritable(self); ++ google::protobuf::Message* message = self->message; ++ const google::protobuf::Descriptor* descriptor = message->GetDescriptor(); ++ ScopedPyObjectPtr arg_in_oneof; ++ bool is_in_oneof; ++ const google::protobuf::FieldDescriptor* field_descriptor = ++ FindFieldWithOneofs(message, field_name, &is_in_oneof); ++ if (field_descriptor == NULL) { ++ if (!is_in_oneof) { ++ PyErr_Format(PyExc_ValueError, ++ "Protocol message has no \"%s\" field.", field_name); ++ return NULL; ++ } else { ++ Py_RETURN_NONE; ++ } ++ } else if (is_in_oneof) { ++ arg_in_oneof.reset(PyString_FromString(field_descriptor->name().c_str())); ++ arg = arg_in_oneof.get(); ++ } ++ ++ PyObject* composite_field = PyDict_GetItem(self->composite_fields, ++ arg); ++ ++ // Only release the field if there's a possibility that there are ++ // references to it. ++ if (composite_field != NULL) { ++ if (InternalReleaseField(self, composite_field, arg) < 0) { ++ return NULL; ++ } ++ PyDict_DelItem(self->composite_fields, arg); ++ } ++ message->GetReflection()->ClearField(message, field_descriptor); ++ if (field_descriptor->cpp_type() == google::protobuf::FieldDescriptor::CPPTYPE_ENUM) { ++ google::protobuf::UnknownFieldSet* unknown_field_set = ++ message->GetReflection()->MutableUnknownFields(message); ++ unknown_field_set->DeleteByNumber(field_descriptor->number()); ++ } ++ ++ Py_RETURN_NONE; ++} ++ ++PyObject* Clear(CMessage* self) { ++ AssureWritable(self); ++ if (ForEachCompositeField(self, ReleaseChild(self->message)) == -1) ++ return NULL; ++ ++ // The old ExtensionDict still aliases this CMessage, but all its ++ // fields have been released. ++ if (self->extensions != NULL) { ++ Py_CLEAR(self->extensions); ++ PyObject* py_extension_dict = PyObject_CallObject( ++ reinterpret_cast(&ExtensionDict_Type), NULL); ++ if (py_extension_dict == NULL) { ++ return NULL; ++ } ++ ExtensionDict* extension_dict = reinterpret_cast( ++ py_extension_dict); ++ extension_dict->parent = self; ++ extension_dict->message = self->message; ++ self->extensions = extension_dict; ++ } ++ PyDict_Clear(self->composite_fields); ++ self->message->Clear(); ++ Py_RETURN_NONE; ++} ++ ++// --------------------------------------------------------------------- ++ ++static string GetMessageName(CMessage* self) { ++ if (self->parent_field != NULL) { ++ return self->parent_field->descriptor->full_name(); ++ } else { ++ return self->message->GetDescriptor()->full_name(); ++ } ++} ++ ++static PyObject* SerializeToString(CMessage* self, PyObject* args) { ++ if (!self->message->IsInitialized()) { ++ ScopedPyObjectPtr errors(FindInitializationErrors(self)); ++ if (errors == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr comma(PyString_FromString(",")); ++ if (comma == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr joined( ++ PyObject_CallMethod(comma.get(), "join", "O", errors.get())); ++ if (joined == NULL) { ++ return NULL; ++ } ++ PyErr_Format(EncodeError_class, "Message %s is missing required fields: %s", ++ GetMessageName(self).c_str(), PyString_AsString(joined.get())); ++ return NULL; ++ } ++ int size = self->message->ByteSize(); ++ if (size <= 0) { ++ return PyBytes_FromString(""); ++ } ++ PyObject* result = PyBytes_FromStringAndSize(NULL, size); ++ if (result == NULL) { ++ return NULL; ++ } ++ char* buffer = PyBytes_AS_STRING(result); ++ self->message->SerializeWithCachedSizesToArray( ++ reinterpret_cast(buffer)); ++ return result; ++} ++ ++static PyObject* SerializePartialToString(CMessage* self) { ++ string contents; ++ self->message->SerializePartialToString(&contents); ++ return PyBytes_FromStringAndSize(contents.c_str(), contents.size()); ++} ++ ++// Formats proto fields for ascii dumps using python formatting functions where ++// appropriate. ++class PythonFieldValuePrinter : public google::protobuf::TextFormat::FieldValuePrinter { ++ public: ++ PythonFieldValuePrinter() : float_holder_(PyFloat_FromDouble(0)) {} ++ ++ // Python has some differences from C++ when printing floating point numbers. ++ // ++ // 1) Trailing .0 is always printed. ++ // 2) Outputted is rounded to 12 digits. ++ // ++ // We override floating point printing with the C-API function for printing ++ // Python floats to ensure consistency. ++ string PrintFloat(float value) const { return PrintDouble(value); } ++ string PrintDouble(double value) const { ++ reinterpret_cast(float_holder_.get())->ob_fval = value; ++ ScopedPyObjectPtr s(PyObject_Str(float_holder_.get())); ++ if (s == NULL) return string(); ++#if PY_MAJOR_VERSION < 3 ++ char *cstr = PyBytes_AS_STRING(static_cast(s)); ++#else ++ char *cstr = PyUnicode_AsUTF8(s); ++#endif ++ return string(cstr); ++ } ++ ++ private: ++ // Holder for a python float object which we use to allow us to use ++ // the Python API for printing doubles. We initialize once and then ++ // directly modify it for every float printed to save on allocations ++ // and refcounting. ++ ScopedPyObjectPtr float_holder_; ++}; ++ ++static PyObject* ToStr(CMessage* self) { ++ google::protobuf::TextFormat::Printer printer; ++ // Passes ownership ++ printer.SetDefaultFieldValuePrinter(new PythonFieldValuePrinter()); ++ printer.SetHideUnknownFields(true); ++ string output; ++ if (!printer.PrintToString(*self->message, &output)) { ++ PyErr_SetString(PyExc_ValueError, "Unable to convert message to str"); ++ return NULL; ++ } ++ return PyString_FromString(output.c_str()); ++} ++ ++PyObject* MergeFrom(CMessage* self, PyObject* arg) { ++ CMessage* other_message; ++ if (!PyObject_TypeCheck(reinterpret_cast(arg), &CMessage_Type)) { ++ PyErr_SetString(PyExc_TypeError, "Must be a message"); ++ return NULL; ++ } ++ ++ other_message = reinterpret_cast(arg); ++ if (other_message->message->GetDescriptor() != ++ self->message->GetDescriptor()) { ++ PyErr_Format(PyExc_TypeError, ++ "Tried to merge from a message with a different type. " ++ "to: %s, from: %s", ++ self->message->GetDescriptor()->full_name().c_str(), ++ other_message->message->GetDescriptor()->full_name().c_str()); ++ return NULL; ++ } ++ AssureWritable(self); ++ ++ // TODO(tibell): Message::MergeFrom might turn some child Messages ++ // into mutable messages, invalidating the message field in the ++ // corresponding CMessages. We should run a FixupMessageReferences ++ // pass here. ++ ++ self->message->MergeFrom(*other_message->message); ++ Py_RETURN_NONE; ++} ++ ++static PyObject* CopyFrom(CMessage* self, PyObject* arg) { ++ CMessage* other_message; ++ if (!PyObject_TypeCheck(reinterpret_cast(arg), &CMessage_Type)) { ++ PyErr_SetString(PyExc_TypeError, "Must be a message"); ++ return NULL; ++ } ++ ++ other_message = reinterpret_cast(arg); ++ ++ if (self == other_message) { ++ Py_RETURN_NONE; ++ } ++ ++ if (other_message->message->GetDescriptor() != ++ self->message->GetDescriptor()) { ++ PyErr_Format(PyExc_TypeError, ++ "Tried to copy from a message with a different type. " ++ "to: %s, from: %s", ++ self->message->GetDescriptor()->full_name().c_str(), ++ other_message->message->GetDescriptor()->full_name().c_str()); ++ return NULL; ++ } ++ ++ AssureWritable(self); ++ ++ // CopyFrom on the message will not clean up self->composite_fields, ++ // which can leave us in an inconsistent state, so clear it out here. ++ Clear(self); ++ ++ self->message->CopyFrom(*other_message->message); ++ ++ Py_RETURN_NONE; ++} ++ ++static PyObject* MergeFromString(CMessage* self, PyObject* arg) { ++ const void* data; ++ Py_ssize_t data_length; ++ if (PyObject_AsReadBuffer(arg, &data, &data_length) < 0) { ++ return NULL; ++ } ++ ++ AssureWritable(self); ++ google::protobuf::io::CodedInputStream input( ++ reinterpret_cast(data), data_length); ++ input.SetExtensionRegistry(GetDescriptorPool(), global_message_factory); ++ bool success = self->message->MergePartialFromCodedStream(&input); ++ if (success) { ++ return PyInt_FromLong(input.CurrentPosition()); ++ } else { ++ PyErr_Format(DecodeError_class, "Error parsing message"); ++ return NULL; ++ } ++} ++ ++static PyObject* ParseFromString(CMessage* self, PyObject* arg) { ++ if (Clear(self) == NULL) { ++ return NULL; ++ } ++ return MergeFromString(self, arg); ++} ++ ++static PyObject* ByteSize(CMessage* self, PyObject* args) { ++ return PyLong_FromLong(self->message->ByteSize()); ++} ++ ++static PyObject* RegisterExtension(PyObject* cls, ++ PyObject* extension_handle) { ++ ScopedPyObjectPtr message_descriptor(PyObject_GetAttr(cls, kDESCRIPTOR)); ++ if (message_descriptor == NULL) { ++ return NULL; ++ } ++ if (PyObject_SetAttrString(extension_handle, "containing_type", ++ message_descriptor) < 0) { ++ return NULL; ++ } ++ ScopedPyObjectPtr extensions_by_name( ++ PyObject_GetAttr(cls, k_extensions_by_name)); ++ if (extensions_by_name == NULL) { ++ PyErr_SetString(PyExc_TypeError, "no extensions_by_name on class"); ++ return NULL; ++ } ++ ScopedPyObjectPtr full_name(PyObject_GetAttr(extension_handle, kfull_name)); ++ if (full_name == NULL) { ++ return NULL; ++ } ++ if (PyDict_SetItem(extensions_by_name, full_name, extension_handle) < 0) { ++ return NULL; ++ } ++ ++ // Also store a mapping from extension number to implementing class. ++ ScopedPyObjectPtr extensions_by_number( ++ PyObject_GetAttr(cls, k_extensions_by_number)); ++ if (extensions_by_number == NULL) { ++ PyErr_SetString(PyExc_TypeError, "no extensions_by_number on class"); ++ return NULL; ++ } ++ ScopedPyObjectPtr number(PyObject_GetAttrString(extension_handle, "number")); ++ if (number == NULL) { ++ return NULL; ++ } ++ if (PyDict_SetItem(extensions_by_number, number, extension_handle) < 0) { ++ return NULL; ++ } ++ ++ CFieldDescriptor* cdescriptor = ++ extension_dict::InternalGetCDescriptorFromExtension(extension_handle); ++ ScopedPyObjectPtr py_cdescriptor(reinterpret_cast(cdescriptor)); ++ if (cdescriptor == NULL) { ++ return NULL; ++ } ++ Py_INCREF(extension_handle); ++ cdescriptor->descriptor_field = extension_handle; ++ const google::protobuf::FieldDescriptor* descriptor = cdescriptor->descriptor; ++ // Check if it's a message set ++ if (descriptor->is_extension() && ++ descriptor->containing_type()->options().message_set_wire_format() && ++ descriptor->type() == google::protobuf::FieldDescriptor::TYPE_MESSAGE && ++ descriptor->message_type() == descriptor->extension_scope() && ++ descriptor->label() == google::protobuf::FieldDescriptor::LABEL_OPTIONAL) { ++ ScopedPyObjectPtr message_name(PyString_FromStringAndSize( ++ descriptor->message_type()->full_name().c_str(), ++ descriptor->message_type()->full_name().size())); ++ if (message_name == NULL) { ++ return NULL; ++ } ++ PyDict_SetItem(extensions_by_name, message_name, extension_handle); ++ } ++ ++ Py_RETURN_NONE; ++} ++ ++static PyObject* SetInParent(CMessage* self, PyObject* args) { ++ AssureWritable(self); ++ Py_RETURN_NONE; ++} ++ ++static PyObject* WhichOneof(CMessage* self, PyObject* arg) { ++ char* oneof_name; ++ if (!PyString_Check(arg)) { ++ PyErr_SetString(PyExc_TypeError, "field name must be a string"); ++ return NULL; ++ } ++ oneof_name = PyString_AsString(arg); ++ if (oneof_name == NULL) { ++ return NULL; ++ } ++ const google::protobuf::OneofDescriptor* oneof_desc = ++ self->message->GetDescriptor()->FindOneofByName(oneof_name); ++ if (oneof_desc == NULL) { ++ PyErr_Format(PyExc_ValueError, ++ "Protocol message has no oneof \"%s\" field.", oneof_name); ++ return NULL; ++ } ++ const google::protobuf::FieldDescriptor* field_in_oneof = ++ self->message->GetReflection()->GetOneofFieldDescriptor( ++ *self->message, oneof_desc); ++ if (field_in_oneof == NULL) { ++ Py_RETURN_NONE; ++ } else { ++ return PyString_FromString(field_in_oneof->name().c_str()); ++ } ++} ++ ++static PyObject* ListFields(CMessage* self) { ++ vector fields; ++ self->message->GetReflection()->ListFields(*self->message, &fields); ++ ++ PyObject* descriptor = PyDict_GetItem(Py_TYPE(self)->tp_dict, kDESCRIPTOR); ++ if (descriptor == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr fields_by_name( ++ PyObject_GetAttr(descriptor, kfields_by_name)); ++ if (fields_by_name == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr extensions_by_name(PyObject_GetAttr( ++ reinterpret_cast(Py_TYPE(self)), k_extensions_by_name)); ++ if (extensions_by_name == NULL) { ++ PyErr_SetString(PyExc_ValueError, "no extensionsbyname"); ++ return NULL; ++ } ++ // Normally, the list will be exactly the size of the fields. ++ PyObject* all_fields = PyList_New(fields.size()); ++ if (all_fields == NULL) { ++ return NULL; ++ } ++ ++ // When there are unknown extensions, the py list will *not* contain ++ // the field information. Thus the actual size of the py list will be ++ // smaller than the size of fields. Set the actual size at the end. ++ Py_ssize_t actual_size = 0; ++ for (Py_ssize_t i = 0; i < fields.size(); ++i) { ++ ScopedPyObjectPtr t(PyTuple_New(2)); ++ if (t == NULL) { ++ Py_DECREF(all_fields); ++ return NULL; ++ } ++ ++ if (fields[i]->is_extension()) { ++ const string& field_name = fields[i]->full_name(); ++ PyObject* extension_field = PyDict_GetItemString(extensions_by_name, ++ field_name.c_str()); ++ if (extension_field == NULL) { ++ // If we couldn't fetch extension_field, it means the module that ++ // defines this extension has not been explicitly imported in Python ++ // code, and the extension hasn't been registered. There's nothing much ++ // we can do about this, so just skip it in the output to match the ++ // behavior of the python implementation. ++ continue; ++ } ++ PyObject* extensions = reinterpret_cast(self->extensions); ++ if (extensions == NULL) { ++ Py_DECREF(all_fields); ++ return NULL; ++ } ++ // 'extension' reference later stolen by PyTuple_SET_ITEM. ++ PyObject* extension = PyObject_GetItem(extensions, extension_field); ++ if (extension == NULL) { ++ Py_DECREF(all_fields); ++ return NULL; ++ } ++ Py_INCREF(extension_field); ++ PyTuple_SET_ITEM(t.get(), 0, extension_field); ++ // Steals reference to 'extension' ++ PyTuple_SET_ITEM(t.get(), 1, extension); ++ } else { ++ const string& field_name = fields[i]->name(); ++ ScopedPyObjectPtr py_field_name(PyString_FromStringAndSize( ++ field_name.c_str(), field_name.length())); ++ if (py_field_name == NULL) { ++ PyErr_SetString(PyExc_ValueError, "bad string"); ++ Py_DECREF(all_fields); ++ return NULL; ++ } ++ PyObject* field_descriptor = ++ PyDict_GetItem(fields_by_name, py_field_name); ++ if (field_descriptor == NULL) { ++ Py_DECREF(all_fields); ++ return NULL; ++ } ++ ++ PyObject* field_value = GetAttr(self, py_field_name); ++ if (field_value == NULL) { ++ PyErr_SetObject(PyExc_ValueError, py_field_name); ++ Py_DECREF(all_fields); ++ return NULL; ++ } ++ Py_INCREF(field_descriptor); ++ PyTuple_SET_ITEM(t.get(), 0, field_descriptor); ++ PyTuple_SET_ITEM(t.get(), 1, field_value); ++ } ++ PyList_SET_ITEM(all_fields, actual_size, t.release()); ++ ++actual_size; ++ } ++ Py_SIZE(all_fields) = actual_size; ++ return all_fields; ++} ++ ++PyObject* FindInitializationErrors(CMessage* self) { ++ google::protobuf::Message* message = self->message; ++ vector errors; ++ message->FindInitializationErrors(&errors); ++ ++ PyObject* error_list = PyList_New(errors.size()); ++ if (error_list == NULL) { ++ return NULL; ++ } ++ for (Py_ssize_t i = 0; i < errors.size(); ++i) { ++ const string& error = errors[i]; ++ PyObject* error_string = PyString_FromStringAndSize( ++ error.c_str(), error.length()); ++ if (error_string == NULL) { ++ Py_DECREF(error_list); ++ return NULL; ++ } ++ PyList_SET_ITEM(error_list, i, error_string); ++ } ++ return error_list; ++} ++ ++static PyObject* RichCompare(CMessage* self, PyObject* other, int opid) { ++ if (!PyObject_TypeCheck(other, &CMessage_Type)) { ++ if (opid == Py_EQ) { ++ Py_RETURN_FALSE; ++ } else if (opid == Py_NE) { ++ Py_RETURN_TRUE; ++ } ++ } ++ if (opid == Py_EQ || opid == Py_NE) { ++ ScopedPyObjectPtr self_fields(ListFields(self)); ++ ScopedPyObjectPtr other_fields(ListFields( ++ reinterpret_cast(other))); ++ return PyObject_RichCompare(self_fields, other_fields, opid); ++ } else { ++ Py_INCREF(Py_NotImplemented); ++ return Py_NotImplemented; ++ } ++} ++ ++PyObject* InternalGetScalar( ++ CMessage* self, ++ const google::protobuf::FieldDescriptor* field_descriptor) { ++ google::protobuf::Message* message = self->message; ++ const google::protobuf::Reflection* reflection = message->GetReflection(); ++ ++ if (!FIELD_BELONGS_TO_MESSAGE(field_descriptor, message)) { ++ PyErr_SetString( ++ PyExc_KeyError, "Field does not belong to message!"); ++ return NULL; ++ } ++ ++ PyObject* result = NULL; ++ switch (field_descriptor->cpp_type()) { ++ case google::protobuf::FieldDescriptor::CPPTYPE_INT32: { ++ int32 value = reflection->GetInt32(*message, field_descriptor); ++ result = PyInt_FromLong(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_INT64: { ++ int64 value = reflection->GetInt64(*message, field_descriptor); ++ result = PyLong_FromLongLong(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_UINT32: { ++ uint32 value = reflection->GetUInt32(*message, field_descriptor); ++ result = PyInt_FromSize_t(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_UINT64: { ++ uint64 value = reflection->GetUInt64(*message, field_descriptor); ++ result = PyLong_FromUnsignedLongLong(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT: { ++ float value = reflection->GetFloat(*message, field_descriptor); ++ result = PyFloat_FromDouble(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE: { ++ double value = reflection->GetDouble(*message, field_descriptor); ++ result = PyFloat_FromDouble(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_BOOL: { ++ bool value = reflection->GetBool(*message, field_descriptor); ++ result = PyBool_FromLong(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_STRING: { ++ string value = reflection->GetString(*message, field_descriptor); ++ result = ToStringObject(field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_ENUM: { ++ if (!message->GetReflection()->HasField(*message, field_descriptor)) { ++ // Look for the value in the unknown fields. ++ google::protobuf::UnknownFieldSet* unknown_field_set = ++ message->GetReflection()->MutableUnknownFields(message); ++ for (int i = 0; i < unknown_field_set->field_count(); ++i) { ++ if (unknown_field_set->field(i).number() == ++ field_descriptor->number()) { ++ result = PyInt_FromLong(unknown_field_set->field(i).varint()); ++ break; ++ } ++ } ++ } ++ ++ if (result == NULL) { ++ const google::protobuf::EnumValueDescriptor* enum_value = ++ message->GetReflection()->GetEnum(*message, field_descriptor); ++ result = PyInt_FromLong(enum_value->number()); ++ } ++ break; ++ } ++ default: ++ PyErr_Format( ++ PyExc_SystemError, "Getting a value from a field of unknown type %d", ++ field_descriptor->cpp_type()); ++ } ++ ++ return result; ++} ++ ++PyObject* InternalGetSubMessage(CMessage* self, ++ CFieldDescriptor* cfield_descriptor) { ++ PyObject* field = cfield_descriptor->descriptor_field; ++ ScopedPyObjectPtr message_type(PyObject_GetAttr(field, kmessage_type)); ++ if (message_type == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr concrete_class( ++ PyObject_GetAttr(message_type, k_concrete_class)); ++ if (concrete_class == NULL) { ++ return NULL; ++ } ++ PyObject* py_cmsg = cmessage::NewEmpty(concrete_class); ++ if (py_cmsg == NULL) { ++ return NULL; ++ } ++ if (!PyObject_TypeCheck(py_cmsg, &CMessage_Type)) { ++ PyErr_SetString(PyExc_TypeError, "Not a CMessage!"); ++ } ++ CMessage* cmsg = reinterpret_cast(py_cmsg); ++ ++ const google::protobuf::FieldDescriptor* field_descriptor = ++ cfield_descriptor->descriptor; ++ const google::protobuf::Reflection* reflection = self->message->GetReflection(); ++ const google::protobuf::Message& sub_message = reflection->GetMessage( ++ *self->message, field_descriptor, global_message_factory); ++ cmsg->owner = self->owner; ++ cmsg->parent = self; ++ cmsg->parent_field = cfield_descriptor; ++ cmsg->read_only = !reflection->HasField(*self->message, field_descriptor); ++ cmsg->message = const_cast(&sub_message); ++ ++ if (InitAttributes(cmsg, NULL, NULL) < 0) { ++ Py_DECREF(py_cmsg); ++ return NULL; ++ } ++ return py_cmsg; ++} ++ ++int InternalSetScalar( ++ CMessage* self, ++ const google::protobuf::FieldDescriptor* field_descriptor, ++ PyObject* arg) { ++ google::protobuf::Message* message = self->message; ++ const google::protobuf::Reflection* reflection = message->GetReflection(); ++ ++ if (!FIELD_BELONGS_TO_MESSAGE(field_descriptor, message)) { ++ PyErr_SetString( ++ PyExc_KeyError, "Field does not belong to message!"); ++ return -1; ++ } ++ ++ if (MaybeReleaseOverlappingOneofField(self, field_descriptor) < 0) { ++ return -1; ++ } ++ ++ switch (field_descriptor->cpp_type()) { ++ case google::protobuf::FieldDescriptor::CPPTYPE_INT32: { ++ GOOGLE_CHECK_GET_INT32(arg, value, -1); ++ reflection->SetInt32(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_INT64: { ++ GOOGLE_CHECK_GET_INT64(arg, value, -1); ++ reflection->SetInt64(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_UINT32: { ++ GOOGLE_CHECK_GET_UINT32(arg, value, -1); ++ reflection->SetUInt32(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_UINT64: { ++ GOOGLE_CHECK_GET_UINT64(arg, value, -1); ++ reflection->SetUInt64(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT: { ++ GOOGLE_CHECK_GET_FLOAT(arg, value, -1); ++ reflection->SetFloat(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE: { ++ GOOGLE_CHECK_GET_DOUBLE(arg, value, -1); ++ reflection->SetDouble(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_BOOL: { ++ GOOGLE_CHECK_GET_BOOL(arg, value, -1); ++ reflection->SetBool(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_STRING: { ++ if (!CheckAndSetString( ++ arg, message, field_descriptor, reflection, false, -1)) { ++ return -1; ++ } ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_ENUM: { ++ GOOGLE_CHECK_GET_INT32(arg, value, -1); ++ const google::protobuf::EnumDescriptor* enum_descriptor = ++ field_descriptor->enum_type(); ++ const google::protobuf::EnumValueDescriptor* enum_value = ++ enum_descriptor->FindValueByNumber(value); ++ if (enum_value != NULL) { ++ reflection->SetEnum(message, field_descriptor, enum_value); ++ } else { ++ PyErr_Format(PyExc_ValueError, "Unknown enum value: %d", value); ++ return -1; ++ } ++ break; ++ } ++ default: ++ PyErr_Format( ++ PyExc_SystemError, "Setting value to a field of unknown type %d", ++ field_descriptor->cpp_type()); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++PyObject* FromString(PyTypeObject* cls, PyObject* serialized) { ++ PyObject* py_cmsg = PyObject_CallObject( ++ reinterpret_cast(cls), NULL); ++ if (py_cmsg == NULL) { ++ return NULL; ++ } ++ CMessage* cmsg = reinterpret_cast(py_cmsg); ++ ++ ScopedPyObjectPtr py_length(MergeFromString(cmsg, serialized)); ++ if (py_length == NULL) { ++ Py_DECREF(py_cmsg); ++ return NULL; ++ } ++ ++ if (InitAttributes(cmsg, NULL, NULL) < 0) { ++ Py_DECREF(py_cmsg); ++ return NULL; ++ } ++ return py_cmsg; ++} ++ ++static PyObject* AddDescriptors(PyTypeObject* cls, ++ PyObject* descriptor) { ++ if (PyObject_SetAttr(reinterpret_cast(cls), ++ k_extensions_by_name, PyDict_New()) < 0) { ++ return NULL; ++ } ++ if (PyObject_SetAttr(reinterpret_cast(cls), ++ k_extensions_by_number, PyDict_New()) < 0) { ++ return NULL; ++ } ++ ++ ScopedPyObjectPtr field_descriptors(PyDict_New()); ++ ++ ScopedPyObjectPtr fields(PyObject_GetAttrString(descriptor, "fields")); ++ if (fields == NULL) { ++ return NULL; ++ } ++ ++ ScopedPyObjectPtr _NUMBER_string(PyString_FromString("_FIELD_NUMBER")); ++ if (_NUMBER_string == NULL) { ++ return NULL; ++ } ++ ++ const Py_ssize_t fields_size = PyList_GET_SIZE(fields.get()); ++ for (int i = 0; i < fields_size; ++i) { ++ PyObject* field = PyList_GET_ITEM(fields.get(), i); ++ ScopedPyObjectPtr field_name(PyObject_GetAttr(field, kname)); ++ ScopedPyObjectPtr full_field_name(PyObject_GetAttr(field, kfull_name)); ++ if (field_name == NULL || full_field_name == NULL) { ++ PyErr_SetString(PyExc_TypeError, "Name is null"); ++ return NULL; ++ } ++ ++ PyObject* field_descriptor = ++ cdescriptor_pool::FindFieldByName(descriptor_pool, full_field_name); ++ if (field_descriptor == NULL) { ++ PyErr_SetString(PyExc_TypeError, "Couldn't find field"); ++ return NULL; ++ } ++ Py_INCREF(field); ++ CFieldDescriptor* cfield_descriptor = reinterpret_cast( ++ field_descriptor); ++ cfield_descriptor->descriptor_field = field; ++ if (PyDict_SetItem(field_descriptors, field_name, field_descriptor) < 0) { ++ return NULL; ++ } ++ ++ // The FieldDescriptor's name field might either be of type bytes or ++ // of type unicode, depending on whether the FieldDescriptor was ++ // parsed from a serialized message or read from the ++ // _pb2.py module. ++ ScopedPyObjectPtr field_name_upcased( ++ PyObject_CallMethod(field_name, "upper", NULL)); ++ if (field_name_upcased == NULL) { ++ return NULL; ++ } ++ ++ ScopedPyObjectPtr field_number_name(PyObject_CallMethod( ++ field_name_upcased, "__add__", "(O)", _NUMBER_string.get())); ++ if (field_number_name == NULL) { ++ return NULL; ++ } ++ ++ ScopedPyObjectPtr number(PyInt_FromLong( ++ cfield_descriptor->descriptor->number())); ++ if (number == NULL) { ++ return NULL; ++ } ++ if (PyObject_SetAttr(reinterpret_cast(cls), ++ field_number_name, number) == -1) { ++ return NULL; ++ } ++ } ++ ++ PyDict_SetItem(cls->tp_dict, k__descriptors, field_descriptors); ++ ++ // Enum Values ++ ScopedPyObjectPtr enum_types(PyObject_GetAttrString(descriptor, ++ "enum_types")); ++ if (enum_types == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr type_iter(PyObject_GetIter(enum_types)); ++ if (type_iter == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr enum_type; ++ while ((enum_type.reset(PyIter_Next(type_iter))) != NULL) { ++ ScopedPyObjectPtr wrapped(PyObject_CallFunctionObjArgs( ++ EnumTypeWrapper_class, enum_type.get(), NULL)); ++ if (wrapped == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr enum_name(PyObject_GetAttr(enum_type, kname)); ++ if (enum_name == NULL) { ++ return NULL; ++ } ++ if (PyObject_SetAttr(reinterpret_cast(cls), ++ enum_name, wrapped) == -1) { ++ return NULL; ++ } ++ ++ ScopedPyObjectPtr enum_values(PyObject_GetAttrString(enum_type, "values")); ++ if (enum_values == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr values_iter(PyObject_GetIter(enum_values)); ++ if (values_iter == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr enum_value; ++ while ((enum_value.reset(PyIter_Next(values_iter))) != NULL) { ++ ScopedPyObjectPtr value_name(PyObject_GetAttr(enum_value, kname)); ++ if (value_name == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr value_number(PyObject_GetAttrString(enum_value, ++ "number")); ++ if (value_number == NULL) { ++ return NULL; ++ } ++ if (PyObject_SetAttr(reinterpret_cast(cls), ++ value_name, value_number) == -1) { ++ return NULL; ++ } ++ } ++ if (PyErr_Occurred()) { // If PyIter_Next failed ++ return NULL; ++ } ++ } ++ if (PyErr_Occurred()) { // If PyIter_Next failed ++ return NULL; ++ } ++ ++ ScopedPyObjectPtr extension_dict( ++ PyObject_GetAttr(descriptor, kextensions_by_name)); ++ if (extension_dict == NULL || !PyDict_Check(extension_dict)) { ++ PyErr_SetString(PyExc_TypeError, "extensions_by_name not a dict"); ++ return NULL; ++ } ++ Py_ssize_t pos = 0; ++ PyObject* extension_name; ++ PyObject* extension_field; ++ ++ while (PyDict_Next(extension_dict, &pos, &extension_name, &extension_field)) { ++ if (PyObject_SetAttr(reinterpret_cast(cls), ++ extension_name, extension_field) == -1) { ++ return NULL; ++ } ++ ScopedPyObjectPtr py_cfield_descriptor( ++ PyObject_GetAttrString(extension_field, "_cdescriptor")); ++ if (py_cfield_descriptor == NULL) { ++ return NULL; ++ } ++ CFieldDescriptor* cfield_descriptor = ++ reinterpret_cast(py_cfield_descriptor.get()); ++ Py_INCREF(extension_field); ++ cfield_descriptor->descriptor_field = extension_field; ++ ++ ScopedPyObjectPtr field_name_upcased( ++ PyObject_CallMethod(extension_name, "upper", NULL)); ++ if (field_name_upcased == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr field_number_name(PyObject_CallMethod( ++ field_name_upcased, "__add__", "(O)", _NUMBER_string.get())); ++ if (field_number_name == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr number(PyInt_FromLong( ++ cfield_descriptor->descriptor->number())); ++ if (number == NULL) { ++ return NULL; ++ } ++ if (PyObject_SetAttr(reinterpret_cast(cls), ++ field_number_name, PyInt_FromLong( ++ cfield_descriptor->descriptor->number())) == -1) { ++ return NULL; ++ } ++ } ++ ++ Py_RETURN_NONE; ++} ++ ++PyObject* DeepCopy(CMessage* self, PyObject* arg) { ++ PyObject* clone = PyObject_CallObject( ++ reinterpret_cast(Py_TYPE(self)), NULL); ++ if (clone == NULL) { ++ return NULL; ++ } ++ if (!PyObject_TypeCheck(clone, &CMessage_Type)) { ++ Py_DECREF(clone); ++ return NULL; ++ } ++ if (InitAttributes(reinterpret_cast(clone), NULL, NULL) < 0) { ++ Py_DECREF(clone); ++ return NULL; ++ } ++ if (MergeFrom(reinterpret_cast(clone), ++ reinterpret_cast(self)) == NULL) { ++ Py_DECREF(clone); ++ return NULL; ++ } ++ return clone; ++} ++ ++PyObject* ToUnicode(CMessage* self) { ++ // Lazy import to prevent circular dependencies ++ ScopedPyObjectPtr text_format( ++ PyImport_ImportModule("google.protobuf.text_format")); ++ if (text_format == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr method_name(PyString_FromString("MessageToString")); ++ if (method_name == NULL) { ++ return NULL; ++ } ++ Py_INCREF(Py_True); ++ ScopedPyObjectPtr encoded(PyObject_CallMethodObjArgs(text_format, method_name, ++ self, Py_True, NULL)); ++ Py_DECREF(Py_True); ++ if (encoded == NULL) { ++ return NULL; ++ } ++#if PY_MAJOR_VERSION < 3 ++ PyObject* decoded = PyString_AsDecodedObject(encoded, "utf-8", NULL); ++#else ++ PyObject* decoded = PyUnicode_FromEncodedObject(encoded, "utf-8", NULL); ++#endif ++ if (decoded == NULL) { ++ return NULL; ++ } ++ return decoded; ++} ++ ++PyObject* Reduce(CMessage* self) { ++ ScopedPyObjectPtr constructor(reinterpret_cast(Py_TYPE(self))); ++ constructor.inc(); ++ ScopedPyObjectPtr args(PyTuple_New(0)); ++ if (args == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr state(PyDict_New()); ++ if (state == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr serialized(SerializePartialToString(self)); ++ if (serialized == NULL) { ++ return NULL; ++ } ++ if (PyDict_SetItemString(state, "serialized", serialized) < 0) { ++ return NULL; ++ } ++ return Py_BuildValue("OOO", constructor.get(), args.get(), state.get()); ++} ++ ++PyObject* SetState(CMessage* self, PyObject* state) { ++ if (!PyDict_Check(state)) { ++ PyErr_SetString(PyExc_TypeError, "state not a dict"); ++ return NULL; ++ } ++ PyObject* serialized = PyDict_GetItemString(state, "serialized"); ++ if (serialized == NULL) { ++ return NULL; ++ } ++ if (ParseFromString(self, serialized) == NULL) { ++ return NULL; ++ } ++ Py_RETURN_NONE; ++} ++ ++// CMessage static methods: ++PyObject* _GetFieldDescriptor(PyObject* unused, PyObject* arg) { ++ return cdescriptor_pool::FindFieldByName(descriptor_pool, arg); ++} ++ ++PyObject* _GetExtensionDescriptor(PyObject* unused, PyObject* arg) { ++ return cdescriptor_pool::FindExtensionByName(descriptor_pool, arg); ++} ++ ++static PyMemberDef Members[] = { ++ {"Extensions", T_OBJECT_EX, offsetof(CMessage, extensions), 0, ++ "Extension dict"}, ++ {NULL} ++}; ++ ++static PyMethodDef Methods[] = { ++ { "__deepcopy__", (PyCFunction)DeepCopy, METH_VARARGS, ++ "Makes a deep copy of the class." }, ++ { "__reduce__", (PyCFunction)Reduce, METH_NOARGS, ++ "Outputs picklable representation of the message." }, ++ { "__setstate__", (PyCFunction)SetState, METH_O, ++ "Inputs picklable representation of the message." }, ++ { "__unicode__", (PyCFunction)ToUnicode, METH_NOARGS, ++ "Outputs a unicode representation of the message." }, ++ { "AddDescriptors", (PyCFunction)AddDescriptors, METH_O | METH_CLASS, ++ "Adds field descriptors to the class" }, ++ { "ByteSize", (PyCFunction)ByteSize, METH_NOARGS, ++ "Returns the size of the message in bytes." }, ++ { "Clear", (PyCFunction)Clear, METH_NOARGS, ++ "Clears the message." }, ++ { "ClearExtension", (PyCFunction)ClearExtension, METH_O, ++ "Clears a message field." }, ++ { "ClearField", (PyCFunction)ClearField, METH_O, ++ "Clears a message field." }, ++ { "CopyFrom", (PyCFunction)CopyFrom, METH_O, ++ "Copies a protocol message into the current message." }, ++ { "FindInitializationErrors", (PyCFunction)FindInitializationErrors, ++ METH_NOARGS, ++ "Finds unset required fields." }, ++ { "FromString", (PyCFunction)FromString, METH_O | METH_CLASS, ++ "Creates new method instance from given serialized data." }, ++ { "HasExtension", (PyCFunction)HasExtension, METH_O, ++ "Checks if a message field is set." }, ++ { "HasField", (PyCFunction)HasField, METH_O, ++ "Checks if a message field is set." }, ++ { "IsInitialized", (PyCFunction)IsInitialized, METH_VARARGS, ++ "Checks if all required fields of a protocol message are set." }, ++ { "ListFields", (PyCFunction)ListFields, METH_NOARGS, ++ "Lists all set fields of a message." }, ++ { "MergeFrom", (PyCFunction)MergeFrom, METH_O, ++ "Merges a protocol message into the current message." }, ++ { "MergeFromString", (PyCFunction)MergeFromString, METH_O, ++ "Merges a serialized message into the current message." }, ++ { "ParseFromString", (PyCFunction)ParseFromString, METH_O, ++ "Parses a serialized message into the current message." }, ++ { "RegisterExtension", (PyCFunction)RegisterExtension, METH_O | METH_CLASS, ++ "Registers an extension with the current message." }, ++ { "SerializePartialToString", (PyCFunction)SerializePartialToString, ++ METH_NOARGS, ++ "Serializes the message to a string, even if it isn't initialized." }, ++ { "SerializeToString", (PyCFunction)SerializeToString, METH_NOARGS, ++ "Serializes the message to a string, only for initialized messages." }, ++ { "SetInParent", (PyCFunction)SetInParent, METH_NOARGS, ++ "Sets the has bit of the given field in its parent message." }, ++ { "WhichOneof", (PyCFunction)WhichOneof, METH_O, ++ "Returns the name of the field set inside a oneof, " ++ "or None if no field is set." }, ++ ++ // Static Methods. ++ { "_BuildFile", (PyCFunction)Python_BuildFile, METH_O | METH_STATIC, ++ "Registers a new protocol buffer file in the global C++ descriptor pool." }, ++ { "_GetFieldDescriptor", (PyCFunction)_GetFieldDescriptor, ++ METH_O | METH_STATIC, "Finds a field descriptor in the message pool." }, ++ { "_GetExtensionDescriptor", (PyCFunction)_GetExtensionDescriptor, ++ METH_O | METH_STATIC, ++ "Finds a extension descriptor in the message pool." }, ++ { NULL, NULL} ++}; ++ ++PyObject* GetAttr(CMessage* self, PyObject* name) { ++ PyObject* value = PyDict_GetItem(self->composite_fields, name); ++ if (value != NULL) { ++ Py_INCREF(value); ++ return value; ++ } ++ ++ PyObject* descriptor = GetDescriptor(self, name); ++ if (descriptor != NULL) { ++ CFieldDescriptor* cdescriptor = ++ reinterpret_cast(descriptor); ++ const google::protobuf::FieldDescriptor* field_descriptor = cdescriptor->descriptor; ++ if (field_descriptor->label() == google::protobuf::FieldDescriptor::LABEL_REPEATED) { ++ if (field_descriptor->cpp_type() == ++ google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) { ++ PyObject* py_container = PyObject_CallObject( ++ reinterpret_cast(&RepeatedCompositeContainer_Type), ++ NULL); ++ if (py_container == NULL) { ++ return NULL; ++ } ++ RepeatedCompositeContainer* container = ++ reinterpret_cast(py_container); ++ PyObject* field = cdescriptor->descriptor_field; ++ PyObject* message_type = PyObject_GetAttr(field, kmessage_type); ++ if (message_type == NULL) { ++ return NULL; ++ } ++ PyObject* concrete_class = ++ PyObject_GetAttr(message_type, k_concrete_class); ++ if (concrete_class == NULL) { ++ return NULL; ++ } ++ container->parent = self; ++ container->parent_field = cdescriptor; ++ container->message = self->message; ++ container->owner = self->owner; ++ container->subclass_init = concrete_class; ++ Py_DECREF(message_type); ++ if (PyDict_SetItem(self->composite_fields, name, py_container) < 0) { ++ Py_DECREF(py_container); ++ return NULL; ++ } ++ return py_container; ++ } else { ++ ScopedPyObjectPtr init_args(PyTuple_Pack(2, self, cdescriptor)); ++ PyObject* py_container = PyObject_CallObject( ++ reinterpret_cast(&RepeatedScalarContainer_Type), ++ init_args); ++ if (py_container == NULL) { ++ return NULL; ++ } ++ if (PyDict_SetItem(self->composite_fields, name, py_container) < 0) { ++ Py_DECREF(py_container); ++ return NULL; ++ } ++ return py_container; ++ } ++ } else { ++ if (field_descriptor->cpp_type() == ++ google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) { ++ PyObject* sub_message = InternalGetSubMessage(self, cdescriptor); ++ if (PyDict_SetItem(self->composite_fields, name, sub_message) < 0) { ++ Py_DECREF(sub_message); ++ return NULL; ++ } ++ return sub_message; ++ } else { ++ return InternalGetScalar(self, field_descriptor); ++ } ++ } ++ } ++ ++ return CMessage_Type.tp_base->tp_getattro(reinterpret_cast(self), ++ name); ++} ++ ++int SetAttr(CMessage* self, PyObject* name, PyObject* value) { ++ if (PyDict_Contains(self->composite_fields, name)) { ++ PyErr_SetString(PyExc_TypeError, "Can't set composite field"); ++ return -1; ++ } ++ ++ PyObject* descriptor = GetDescriptor(self, name); ++ if (descriptor != NULL) { ++ AssureWritable(self); ++ CFieldDescriptor* cdescriptor = ++ reinterpret_cast(descriptor); ++ const google::protobuf::FieldDescriptor* field_descriptor = cdescriptor->descriptor; ++ if (field_descriptor->label() == google::protobuf::FieldDescriptor::LABEL_REPEATED) { ++ PyErr_Format(PyExc_AttributeError, "Assignment not allowed to repeated " ++ "field \"%s\" in protocol message object.", ++ field_descriptor->name().c_str()); ++ return -1; ++ } else { ++ if (field_descriptor->cpp_type() == ++ google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) { ++ PyErr_Format(PyExc_AttributeError, "Assignment not allowed to " ++ "field \"%s\" in protocol message object.", ++ field_descriptor->name().c_str()); ++ return -1; ++ } else { ++ return InternalSetScalar(self, field_descriptor, value); ++ } ++ } ++ } ++ ++ PyErr_Format(PyExc_AttributeError, "Assignment not allowed"); ++ return -1; ++} ++ ++} // namespace cmessage ++ ++PyTypeObject CMessage_Type = { ++ PyVarObject_HEAD_INIT(&PyType_Type, 0) ++ "google.protobuf.internal." ++ "cpp._message.CMessage", // tp_name ++ sizeof(CMessage), // tp_basicsize ++ 0, // tp_itemsize ++ (destructor)cmessage::Dealloc, // tp_dealloc ++ 0, // tp_print ++ 0, // tp_getattr ++ 0, // tp_setattr ++ 0, // tp_compare ++ 0, // tp_repr ++ 0, // tp_as_number ++ 0, // tp_as_sequence ++ 0, // tp_as_mapping ++ 0, // tp_hash ++ 0, // tp_call ++ (reprfunc)cmessage::ToStr, // tp_str ++ (getattrofunc)cmessage::GetAttr, // tp_getattro ++ (setattrofunc)cmessage::SetAttr, // tp_setattro ++ 0, // tp_as_buffer ++ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, // tp_flags ++ "A ProtocolMessage", // tp_doc ++ 0, // tp_traverse ++ 0, // tp_clear ++ (richcmpfunc)cmessage::RichCompare, // tp_richcompare ++ 0, // tp_weaklistoffset ++ 0, // tp_iter ++ 0, // tp_iternext ++ cmessage::Methods, // tp_methods ++ cmessage::Members, // tp_members ++ 0, // tp_getset ++ 0, // tp_base ++ 0, // tp_dict ++ 0, // tp_descr_get ++ 0, // tp_descr_set ++ 0, // tp_dictoffset ++ (initproc)cmessage::Init, // tp_init ++ 0, // tp_alloc ++ cmessage::New, // tp_new ++}; ++ ++// --- Exposing the C proto living inside Python proto to C code: ++ ++const Message* (*GetCProtoInsidePyProtoPtr)(PyObject* msg); ++Message* (*MutableCProtoInsidePyProtoPtr)(PyObject* msg); ++ ++static const google::protobuf::Message* GetCProtoInsidePyProtoImpl(PyObject* msg) { ++ if (!PyObject_TypeCheck(msg, &CMessage_Type)) { ++ return NULL; ++ } ++ CMessage* cmsg = reinterpret_cast(msg); ++ return cmsg->message; ++} ++ ++static google::protobuf::Message* MutableCProtoInsidePyProtoImpl(PyObject* msg) { ++ if (!PyObject_TypeCheck(msg, &CMessage_Type)) { ++ return NULL; ++ } ++ CMessage* cmsg = reinterpret_cast(msg); ++ if (PyDict_Size(cmsg->composite_fields) != 0 || ++ (cmsg->extensions != NULL && ++ PyDict_Size(cmsg->extensions->values) != 0)) { ++ // There is currently no way of accurately syncing arbitrary changes to ++ // the underlying C++ message back to the CMessage (e.g. removed repeated ++ // composite containers). We only allow direct mutation of the underlying ++ // C++ message if there is no child data in the CMessage. ++ return NULL; ++ } ++ cmessage::AssureWritable(cmsg); ++ return cmsg->message; ++} ++ ++static const char module_docstring[] = ++"python-proto2 is a module that can be used to enhance proto2 Python API\n" ++"performance.\n" ++"\n" ++"It provides access to the protocol buffers C++ reflection API that\n" ++"implements the basic protocol buffer functions."; ++ ++void InitGlobals() { ++ // TODO(gps): Check all return values in this function for NULL and propagate ++ // the error (MemoryError) on up to result in an import failure. These should ++ // also be freed and reset to NULL during finalization. ++ kPythonZero = PyInt_FromLong(0); ++ kint32min_py = PyInt_FromLong(kint32min); ++ kint32max_py = PyInt_FromLong(kint32max); ++ kuint32max_py = PyLong_FromLongLong(kuint32max); ++ kint64min_py = PyLong_FromLongLong(kint64min); ++ kint64max_py = PyLong_FromLongLong(kint64max); ++ kuint64max_py = PyLong_FromUnsignedLongLong(kuint64max); ++ ++ kDESCRIPTOR = PyString_FromString("DESCRIPTOR"); ++ k__descriptors = PyString_FromString("__descriptors"); ++ kfull_name = PyString_FromString("full_name"); ++ kis_extendable = PyString_FromString("is_extendable"); ++ kextensions_by_name = PyString_FromString("extensions_by_name"); ++ k_extensions_by_name = PyString_FromString("_extensions_by_name"); ++ k_extensions_by_number = PyString_FromString("_extensions_by_number"); ++ k_concrete_class = PyString_FromString("_concrete_class"); ++ kmessage_type = PyString_FromString("message_type"); ++ kname = PyString_FromString("name"); ++ kfields_by_name = PyString_FromString("fields_by_name"); ++ ++ global_message_factory = new DynamicMessageFactory(GetDescriptorPool()); ++ global_message_factory->SetDelegateToGeneratedFactory(true); ++ ++ descriptor_pool = reinterpret_cast( ++ Python_NewCDescriptorPool(NULL, NULL)); ++} ++ ++bool InitProto2MessageModule(PyObject *m) { ++ InitGlobals(); ++ ++ google::protobuf::python::CMessage_Type.tp_hash = PyObject_HashNotImplemented; ++ if (PyType_Ready(&google::protobuf::python::CMessage_Type) < 0) { ++ return false; ++ } ++ ++ // All three of these are actually set elsewhere, directly onto the child ++ // protocol buffer message class, but set them here as well to document that ++ // subclasses need to set these. ++ PyDict_SetItem(google::protobuf::python::CMessage_Type.tp_dict, kDESCRIPTOR, Py_None); ++ PyDict_SetItem(google::protobuf::python::CMessage_Type.tp_dict, ++ k_extensions_by_name, Py_None); ++ PyDict_SetItem(google::protobuf::python::CMessage_Type.tp_dict, ++ k_extensions_by_number, Py_None); ++ ++ PyModule_AddObject(m, "Message", reinterpret_cast( ++ &google::protobuf::python::CMessage_Type)); ++ ++ google::protobuf::python::RepeatedScalarContainer_Type.tp_new = PyType_GenericNew; ++ google::protobuf::python::RepeatedScalarContainer_Type.tp_hash = ++ PyObject_HashNotImplemented; ++ if (PyType_Ready(&google::protobuf::python::RepeatedScalarContainer_Type) < 0) { ++ return false; ++ } ++ ++ PyModule_AddObject(m, "RepeatedScalarContainer", ++ reinterpret_cast( ++ &google::protobuf::python::RepeatedScalarContainer_Type)); ++ ++ google::protobuf::python::RepeatedCompositeContainer_Type.tp_new = PyType_GenericNew; ++ google::protobuf::python::RepeatedCompositeContainer_Type.tp_hash = ++ PyObject_HashNotImplemented; ++ if (PyType_Ready(&google::protobuf::python::RepeatedCompositeContainer_Type) < 0) { ++ return false; ++ } ++ ++ PyModule_AddObject( ++ m, "RepeatedCompositeContainer", ++ reinterpret_cast( ++ &google::protobuf::python::RepeatedCompositeContainer_Type)); ++ ++ google::protobuf::python::ExtensionDict_Type.tp_new = PyType_GenericNew; ++ google::protobuf::python::ExtensionDict_Type.tp_hash = PyObject_HashNotImplemented; ++ if (PyType_Ready(&google::protobuf::python::ExtensionDict_Type) < 0) { ++ return false; ++ } ++ ++ PyModule_AddObject( ++ m, "ExtensionDict", ++ reinterpret_cast(&google::protobuf::python::ExtensionDict_Type)); ++ ++ if (!google::protobuf::python::InitDescriptor()) { ++ return false; ++ } ++ ++ PyObject* enum_type_wrapper = PyImport_ImportModule( ++ "google.protobuf.internal.enum_type_wrapper"); ++ if (enum_type_wrapper == NULL) { ++ return false; ++ } ++ google::protobuf::python::EnumTypeWrapper_class = ++ PyObject_GetAttrString(enum_type_wrapper, "EnumTypeWrapper"); ++ Py_DECREF(enum_type_wrapper); ++ ++ PyObject* message_module = PyImport_ImportModule( ++ "google.protobuf.message"); ++ if (message_module == NULL) { ++ return false; ++ } ++ google::protobuf::python::EncodeError_class = PyObject_GetAttrString(message_module, ++ "EncodeError"); ++ google::protobuf::python::DecodeError_class = PyObject_GetAttrString(message_module, ++ "DecodeError"); ++ Py_DECREF(message_module); ++ ++ PyObject* pickle_module = PyImport_ImportModule("pickle"); ++ if (pickle_module == NULL) { ++ return false; ++ } ++ google::protobuf::python::PickleError_class = PyObject_GetAttrString(pickle_module, ++ "PickleError"); ++ Py_DECREF(pickle_module); ++ ++ // Override {Get,Mutable}CProtoInsidePyProto. ++ google::protobuf::python::GetCProtoInsidePyProtoPtr = ++ google::protobuf::python::GetCProtoInsidePyProtoImpl; ++ google::protobuf::python::MutableCProtoInsidePyProtoPtr = ++ google::protobuf::python::MutableCProtoInsidePyProtoImpl; ++ ++ return true; ++} ++ ++} // namespace python ++} // namespace protobuf ++ ++ ++#if PY_MAJOR_VERSION >= 3 ++static struct PyModuleDef _module = { ++ PyModuleDef_HEAD_INIT, ++ "_message", ++ google::protobuf::python::module_docstring, ++ -1, ++ NULL, ++ NULL, ++ NULL, ++ NULL, ++ NULL ++}; ++#define INITFUNC PyInit__message ++#define INITFUNC_ERRORVAL NULL ++#else // Python 2 ++#define INITFUNC init_message ++#define INITFUNC_ERRORVAL ++#endif ++ ++extern "C" { ++ PyMODINIT_FUNC INITFUNC(void) { ++ PyObject* m; ++#if PY_MAJOR_VERSION >= 3 ++ m = PyModule_Create(&_module); ++#else ++ m = Py_InitModule3("_message", NULL, google::protobuf::python::module_docstring); ++#endif ++ if (m == NULL) { ++ return INITFUNC_ERRORVAL; ++ } ++ ++ if (!google::protobuf::python::InitProto2MessageModule(m)) { ++ Py_DECREF(m); ++ return INITFUNC_ERRORVAL; ++ } ++ ++#if PY_MAJOR_VERSION >= 3 ++ return m; ++#endif ++ } ++} ++} // namespace google +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/message.h +@@ -0,0 +1,305 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: anuraag@google.com (Anuraag Agrawal) ++// Author: tibell@google.com (Johan Tibell) ++ ++#ifndef GOOGLE_PROTOBUF_PYTHON_CPP_MESSAGE_H__ ++#define GOOGLE_PROTOBUF_PYTHON_CPP_MESSAGE_H__ ++ ++#include ++ ++#include ++#ifndef _SHARED_PTR_H ++#include ++#endif ++#include ++ ++ ++namespace google { ++namespace protobuf { ++ ++class Message; ++class Reflection; ++class FieldDescriptor; ++ ++using internal::shared_ptr; ++ ++namespace python { ++ ++struct CFieldDescriptor; ++struct ExtensionDict; ++ ++typedef struct CMessage { ++ PyObject_HEAD; ++ ++ // This is the top-level C++ Message object that owns the whole ++ // proto tree. Every Python CMessage holds a reference to it in ++ // order to keep it alive as long as there's a Python object that ++ // references any part of the tree. ++ shared_ptr owner; ++ ++ // Weak reference to a parent CMessage object. This is NULL for any top-level ++ // message and is set for any child message (i.e. a child submessage or a ++ // part of a repeated composite field). ++ // ++ // Used to make sure all ancestors are also mutable when first modifying ++ // a child submessage (in other words, turning a default message instance ++ // into a mutable one). ++ // ++ // If a submessage is released (becomes a new top-level message), this field ++ // MUST be set to NULL. The parent may get deallocated and further attempts ++ // to use this pointer will result in a crash. ++ struct CMessage* parent; ++ ++ // Weak reference to the parent's descriptor that describes this submessage. ++ // Used together with the parent's message when making a default message ++ // instance mutable. ++ // TODO(anuraag): With a bit of work on the Python/C++ layer, it should be ++ // possible to make this a direct pointer to a C++ FieldDescriptor, this would ++ // be easier if this implementation replaces upstream. ++ CFieldDescriptor* parent_field; ++ ++ // Pointer to the C++ Message object for this CMessage. The ++ // CMessage does not own this pointer. ++ Message* message; ++ ++ // Indicates this submessage is pointing to a default instance of a message. ++ // Submessages are always first created as read only messages and are then ++ // made writable, at which point this field is set to false. ++ bool read_only; ++ ++ // A reference to a Python dictionary containing CMessage, ++ // RepeatedCompositeContainer, and RepeatedScalarContainer ++ // objects. Used as a cache to make sure we don't have to make a ++ // Python wrapper for the C++ Message objects on every access, or ++ // deal with the synchronization nightmare that could create. ++ PyObject* composite_fields; ++ ++ // A reference to the dictionary containing the message's extensions. ++ // Similar to composite_fields, acting as a cache, but also contains the ++ // required extension dict logic. ++ ExtensionDict* extensions; ++} CMessage; ++ ++extern PyTypeObject CMessage_Type; ++ ++namespace cmessage { ++ ++// Create a new empty message that can be populated by the parent. ++PyObject* NewEmpty(PyObject* type); ++ ++// Release a submessage from its proto tree, making it a new top-level messgae. ++// A new message will be created if this is a read-only default instance. ++// ++// Corresponds to reflection api method ReleaseMessage. ++int ReleaseSubMessage(google::protobuf::Message* message, ++ const google::protobuf::FieldDescriptor* field_descriptor, ++ CMessage* child_cmessage); ++ ++// Initializes a new CMessage instance for a submessage. Only called once per ++// submessage as the result is cached in composite_fields. ++// ++// Corresponds to reflection api method GetMessage. ++PyObject* InternalGetSubMessage(CMessage* self, ++ CFieldDescriptor* cfield_descriptor); ++ ++// Deletes a range of C++ submessages in a repeated field (following a ++// removal in a RepeatedCompositeContainer). ++// ++// Releases messages to the provided cmessage_list if it is not NULL rather ++// than just removing them from the underlying proto. This cmessage_list must ++// have a CMessage for each underlying submessage. The CMessages refered to ++// by slice will be removed from cmessage_list by this function. ++// ++// Corresponds to reflection api method RemoveLast. ++int InternalDeleteRepeatedField(google::protobuf::Message* message, ++ const google::protobuf::FieldDescriptor* field_descriptor, ++ PyObject* slice, PyObject* cmessage_list); ++ ++// Sets the specified scalar value to the message. ++int InternalSetScalar(CMessage* self, ++ const google::protobuf::FieldDescriptor* field_descriptor, ++ PyObject* value); ++ ++// Retrieves the specified scalar value from the message. ++// ++// Returns a new python reference. ++PyObject* InternalGetScalar(CMessage* self, ++ const google::protobuf::FieldDescriptor* field_descriptor); ++ ++// Clears the message, removing all contained data. Extension dictionary and ++// submessages are released first if there are remaining external references. ++// ++// Corresponds to message api method Clear. ++PyObject* Clear(CMessage* self); ++ ++// Clears the data described by the given descriptor. Used to clear extensions ++// (which don't have names). Extension release is handled by ExtensionDict ++// class, not this function. ++// TODO(anuraag): Try to make this discrepancy in release semantics with ++// ClearField less confusing. ++// ++// Corresponds to reflection api method ClearField. ++PyObject* ClearFieldByDescriptor( ++ CMessage* self, ++ const google::protobuf::FieldDescriptor* descriptor); ++ ++// Clears the data for the given field name. The message is released if there ++// are any external references. ++// ++// Corresponds to reflection api method ClearField. ++PyObject* ClearField(CMessage* self, PyObject* arg); ++ ++// Checks if the message has the field described by the descriptor. Used for ++// extensions (which have no name). ++// ++// Corresponds to reflection api method HasField ++PyObject* HasFieldByDescriptor( ++ CMessage* self, const google::protobuf::FieldDescriptor* field_descriptor); ++ ++// Checks if the message has the named field. ++// ++// Corresponds to reflection api method HasField. ++PyObject* HasField(CMessage* self, PyObject* arg); ++ ++// Initializes constants/enum values on a message. This is called by ++// RepeatedCompositeContainer and ExtensionDict after calling the constructor. ++// TODO(anuraag): Make it always called from within the constructor since it can ++int InitAttributes(CMessage* self, PyObject* descriptor, PyObject* kwargs); ++ ++PyObject* MergeFrom(CMessage* self, PyObject* arg); ++ ++// Retrieves an attribute named 'name' from CMessage 'self'. Returns ++// the attribute value on success, or NULL on failure. ++// ++// Returns a new reference. ++PyObject* GetAttr(CMessage* self, PyObject* name); ++ ++// Set the value of the attribute named 'name', for CMessage 'self', ++// to the value 'value'. Returns -1 on failure. ++int SetAttr(CMessage* self, PyObject* name, PyObject* value); ++ ++PyObject* FindInitializationErrors(CMessage* self); ++ ++// Set the owner field of self and any children of self, recursively. ++// Used when self is being released and thus has a new owner (the ++// released Message.) ++int SetOwner(CMessage* self, const shared_ptr& new_owner); ++ ++int AssureWritable(CMessage* self); ++ ++} // namespace cmessage ++ ++/* Is 64bit */ ++#define IS_64BIT (SIZEOF_LONG == 8) ++ ++#define FIELD_BELONGS_TO_MESSAGE(field_descriptor, message) \ ++ ((message)->GetDescriptor() == (field_descriptor)->containing_type()) ++ ++#define FIELD_IS_REPEATED(field_descriptor) \ ++ ((field_descriptor)->label() == google::protobuf::FieldDescriptor::LABEL_REPEATED) ++ ++#define GOOGLE_CHECK_GET_INT32(arg, value, err) \ ++ int32 value; \ ++ if (!CheckAndGetInteger(arg, &value, kint32min_py, kint32max_py)) { \ ++ return err; \ ++ } ++ ++#define GOOGLE_CHECK_GET_INT64(arg, value, err) \ ++ int64 value; \ ++ if (!CheckAndGetInteger(arg, &value, kint64min_py, kint64max_py)) { \ ++ return err; \ ++ } ++ ++#define GOOGLE_CHECK_GET_UINT32(arg, value, err) \ ++ uint32 value; \ ++ if (!CheckAndGetInteger(arg, &value, kPythonZero, kuint32max_py)) { \ ++ return err; \ ++ } ++ ++#define GOOGLE_CHECK_GET_UINT64(arg, value, err) \ ++ uint64 value; \ ++ if (!CheckAndGetInteger(arg, &value, kPythonZero, kuint64max_py)) { \ ++ return err; \ ++ } ++ ++#define GOOGLE_CHECK_GET_FLOAT(arg, value, err) \ ++ float value; \ ++ if (!CheckAndGetFloat(arg, &value)) { \ ++ return err; \ ++ } \ ++ ++#define GOOGLE_CHECK_GET_DOUBLE(arg, value, err) \ ++ double value; \ ++ if (!CheckAndGetDouble(arg, &value)) { \ ++ return err; \ ++ } ++ ++#define GOOGLE_CHECK_GET_BOOL(arg, value, err) \ ++ bool value; \ ++ if (!CheckAndGetBool(arg, &value)) { \ ++ return err; \ ++ } ++ ++ ++extern PyObject* kPythonZero; ++extern PyObject* kint32min_py; ++extern PyObject* kint32max_py; ++extern PyObject* kuint32max_py; ++extern PyObject* kint64min_py; ++extern PyObject* kint64max_py; ++extern PyObject* kuint64max_py; ++ ++#define C(str) const_cast(str) ++ ++void FormatTypeError(PyObject* arg, char* expected_types); ++template ++bool CheckAndGetInteger( ++ PyObject* arg, T* value, PyObject* min, PyObject* max); ++bool CheckAndGetDouble(PyObject* arg, double* value); ++bool CheckAndGetFloat(PyObject* arg, float* value); ++bool CheckAndGetBool(PyObject* arg, bool* value); ++bool CheckAndSetString( ++ PyObject* arg, google::protobuf::Message* message, ++ const google::protobuf::FieldDescriptor* descriptor, ++ const google::protobuf::Reflection* reflection, ++ bool append, ++ int index); ++PyObject* ToStringObject( ++ const google::protobuf::FieldDescriptor* descriptor, string value); ++ ++extern PyObject* PickleError_class; ++ ++} // namespace python ++} // namespace protobuf ++ ++} // namespace google ++#endif // GOOGLE_PROTOBUF_PYTHON_CPP_MESSAGE_H__ +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/message_factory_cpp2_test.py +@@ -0,0 +1,56 @@ ++#! /usr/bin/python ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Tests for google.protobuf.message_factory.""" ++ ++import os ++os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp' ++os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION'] = '2' ++ ++# We must set the implementation version above before the google3 imports. ++# pylint: disable=g-import-not-at-top ++from google.apputils import basetest ++from google.protobuf.internal import api_implementation ++# Run all tests from the original module by putting them in our namespace. ++# pylint: disable=wildcard-import ++from google.protobuf.internal.message_factory_test import * ++ ++ ++class ConfirmCppApi2Test(basetest.TestCase): ++ ++ def testImplementationSetting(self): ++ self.assertEqual('cpp', api_implementation.Type()) ++ self.assertEqual(2, api_implementation.Version()) ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/proto2_api_test.proto +@@ -0,0 +1,38 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++import "google/protobuf/internal/cpp/proto1_api_test.proto"; ++ ++package google.protobuf.python.internal; ++ ++message TestNestedProto1APIMessage { ++ optional int32 a = 1; ++ optional TestMessage.NestedMessage b = 2; ++} +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/python.proto +@@ -0,0 +1,66 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: tibell@google.com (Johan Tibell) ++// ++// These message definitions are used to exercises known corner cases ++// in the C++ implementation of the Python API. ++ ++ ++package google.protobuf.python.internal; ++ ++// Protos optimized for SPEED use a strict superset of the generated code ++// of equivalent ones optimized for CODE_SIZE, so we should optimize all our ++// tests for speed unless explicitly testing code size optimization. ++option optimize_for = SPEED; ++ ++message TestAllTypes { ++ message NestedMessage { ++ optional int32 bb = 1; ++ optional ForeignMessage cc = 2; ++ } ++ ++ repeated NestedMessage repeated_nested_message = 1; ++ optional NestedMessage optional_nested_message = 2; ++ optional int32 optional_int32 = 3; ++} ++ ++message ForeignMessage { ++ optional int32 c = 1; ++ repeated int32 d = 2; ++} ++ ++message TestAllExtensions { ++ extensions 1 to max; ++} ++ ++extend TestAllExtensions { ++ optional TestAllTypes.NestedMessage optional_nested_message_extension = 1; ++} +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/python_protobuf.h +@@ -0,0 +1,57 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: qrczak@google.com (Marcin Kowalczyk) ++// ++// This module exposes the C proto inside the given Python proto, in ++// case the Python proto is implemented with a C proto. ++ ++#ifndef GOOGLE_PROTOBUF_PYTHON_PYTHON_PROTOBUF_H__ ++#define GOOGLE_PROTOBUF_PYTHON_PYTHON_PROTOBUF_H__ ++ ++#include ++ ++namespace google { ++namespace protobuf { ++ ++class Message; ++ ++namespace python { ++ ++// Return the pointer to the C proto inside the given Python proto, ++// or NULL when this is not a Python proto implemented with a C proto. ++const Message* GetCProtoInsidePyProto(PyObject* msg); ++Message* MutableCProtoInsidePyProto(PyObject* msg); ++ ++} // namespace python ++} // namespace protobuf ++ ++} // namespace google ++#endif // GOOGLE_PROTOBUF_PYTHON_PYTHON_PROTOBUF_H__ +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/reflection_cpp2_generated_test.py +@@ -0,0 +1,94 @@ ++#! /usr/bin/python ++# -*- coding: utf-8 -*- ++# ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Unittest for reflection.py, which tests the generated C++ implementation.""" ++ ++__author__ = 'jasonh@google.com (Jason Hsueh)' ++ ++import os ++os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp' ++os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION'] = '2' ++ ++from google.apputils import basetest ++from google.protobuf.internal import api_implementation ++from google.protobuf.internal import more_extensions_dynamic_pb2 ++from google.protobuf.internal import more_extensions_pb2 ++from google.protobuf.internal.reflection_test import * ++ ++ ++class ReflectionCppTest(basetest.TestCase): ++ def testImplementationSetting(self): ++ self.assertEqual('cpp', api_implementation.Type()) ++ self.assertEqual(2, api_implementation.Version()) ++ ++ def testExtensionOfGeneratedTypeInDynamicFile(self): ++ """Tests that a file built dynamically can extend a generated C++ type. ++ ++ The C++ implementation uses a DescriptorPool that has the generated ++ DescriptorPool as an underlay. Typically, a type can only find ++ extensions in its own pool. With the python C-extension, the generated C++ ++ extendee may be available, but not the extension. This tests that the ++ C-extension implements the correct special handling to make such extensions ++ available. ++ """ ++ pb1 = more_extensions_pb2.ExtendedMessage() ++ # Test that basic accessors work. ++ self.assertFalse( ++ pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_int32_extension)) ++ self.assertFalse( ++ pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_message_extension)) ++ pb1.Extensions[more_extensions_dynamic_pb2.dynamic_int32_extension] = 17 ++ pb1.Extensions[more_extensions_dynamic_pb2.dynamic_message_extension].a = 24 ++ self.assertTrue( ++ pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_int32_extension)) ++ self.assertTrue( ++ pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_message_extension)) ++ ++ # Now serialize the data and parse to a new message. ++ pb2 = more_extensions_pb2.ExtendedMessage() ++ pb2.MergeFromString(pb1.SerializeToString()) ++ ++ self.assertTrue( ++ pb2.HasExtension(more_extensions_dynamic_pb2.dynamic_int32_extension)) ++ self.assertTrue( ++ pb2.HasExtension(more_extensions_dynamic_pb2.dynamic_message_extension)) ++ self.assertEqual( ++ 17, pb2.Extensions[more_extensions_dynamic_pb2.dynamic_int32_extension]) ++ self.assertEqual( ++ 24, ++ pb2.Extensions[more_extensions_dynamic_pb2.dynamic_message_extension].a) ++ ++ ++ ++if __name__ == '__main__': ++ basetest.main() +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/repeated_composite_container.cc +@@ -0,0 +1,763 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: anuraag@google.com (Anuraag Agrawal) ++// Author: tibell@google.com (Johan Tibell) ++ ++#include ++ ++#include ++#ifndef _SHARED_PTR_H ++#include ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#if PY_MAJOR_VERSION >= 3 ++ #define PyInt_Check PyLong_Check ++ #define PyInt_AsLong PyLong_AsLong ++ #define PyInt_FromLong PyLong_FromLong ++#endif ++ ++namespace google { ++namespace protobuf { ++namespace python { ++ ++extern google::protobuf::DynamicMessageFactory* global_message_factory; ++ ++namespace repeated_composite_container { ++ ++// TODO(tibell): We might also want to check: ++// GOOGLE_CHECK_NOTNULL((self)->owner.get()); ++#define GOOGLE_CHECK_ATTACHED(self) \ ++ do { \ ++ GOOGLE_CHECK_NOTNULL((self)->message); \ ++ GOOGLE_CHECK_NOTNULL((self)->parent_field); \ ++ } while (0); ++ ++#define GOOGLE_CHECK_RELEASED(self) \ ++ do { \ ++ GOOGLE_CHECK((self)->owner.get() == NULL); \ ++ GOOGLE_CHECK((self)->message == NULL); \ ++ GOOGLE_CHECK((self)->parent_field == NULL); \ ++ GOOGLE_CHECK((self)->parent == NULL); \ ++ } while (0); ++ ++// Returns a new reference. ++static PyObject* GetKey(PyObject* x) { ++ // Just the identity function. ++ Py_INCREF(x); ++ return x; ++} ++ ++#define GET_KEY(keyfunc, value) \ ++ ((keyfunc) == NULL ? \ ++ GetKey((value)) : \ ++ PyObject_CallFunctionObjArgs((keyfunc), (value), NULL)) ++ ++// Converts a comparison function that returns -1, 0, or 1 into a ++// less-than predicate. ++// ++// Returns -1 on error, 1 if x < y, 0 if x >= y. ++static int islt(PyObject *x, PyObject *y, PyObject *compare) { ++ if (compare == NULL) ++ return PyObject_RichCompareBool(x, y, Py_LT); ++ ++ ScopedPyObjectPtr res(PyObject_CallFunctionObjArgs(compare, x, y, NULL)); ++ if (res == NULL) ++ return -1; ++ if (!PyInt_Check(res)) { ++ PyErr_Format(PyExc_TypeError, ++ "comparison function must return int, not %.200s", ++ Py_TYPE(res)->tp_name); ++ return -1; ++ } ++ return PyInt_AsLong(res) < 0; ++} ++ ++// Copied from uarrsort.c but swaps memcpy swaps with protobuf/python swaps ++// TODO(anuraag): Is there a better way to do this then reinventing the wheel? ++static int InternalQuickSort(RepeatedCompositeContainer* self, ++ Py_ssize_t start, ++ Py_ssize_t limit, ++ PyObject* cmp, ++ PyObject* keyfunc) { ++ if (limit - start <= 1) ++ return 0; // Nothing to sort. ++ ++ GOOGLE_CHECK_ATTACHED(self); ++ ++ google::protobuf::Message* message = self->message; ++ const google::protobuf::Reflection* reflection = message->GetReflection(); ++ const google::protobuf::FieldDescriptor* descriptor = self->parent_field->descriptor; ++ Py_ssize_t left; ++ Py_ssize_t right; ++ ++ PyObject* children = self->child_messages; ++ ++ do { ++ left = start; ++ right = limit; ++ ScopedPyObjectPtr mid( ++ GET_KEY(keyfunc, PyList_GET_ITEM(children, (start + limit) / 2))); ++ do { ++ ScopedPyObjectPtr key(GET_KEY(keyfunc, PyList_GET_ITEM(children, left))); ++ int is_lt = islt(key, mid, cmp); ++ if (is_lt == -1) ++ return -1; ++ /* array[left]SwapElements(message, descriptor, left, right); ++ PyObject* tmp = PyList_GET_ITEM(children, left); ++ PyList_SET_ITEM(children, left, PyList_GET_ITEM(children, right)); ++ PyList_SET_ITEM(children, right, tmp); ++ } ++ ++left; ++ } ++ } while (left < right); ++ ++ if ((right - start) < (limit - left)) { ++ /* sort [start..right[ */ ++ if (start < (right - 1)) { ++ InternalQuickSort(self, start, right, cmp, keyfunc); ++ } ++ ++ /* sort [left..limit[ */ ++ start = left; ++ } else { ++ /* sort [left..limit[ */ ++ if (left < (limit - 1)) { ++ InternalQuickSort(self, left, limit, cmp, keyfunc); ++ } ++ ++ /* sort [start..right[ */ ++ limit = right; ++ } ++ } while (start < (limit - 1)); ++ ++ return 0; ++} ++ ++#undef GET_KEY ++ ++// --------------------------------------------------------------------- ++// len() ++ ++static Py_ssize_t Length(RepeatedCompositeContainer* self) { ++ google::protobuf::Message* message = self->message; ++ if (message != NULL) { ++ return message->GetReflection()->FieldSize(*message, ++ self->parent_field->descriptor); ++ } else { ++ // The container has been released (i.e. by a call to Clear() or ++ // ClearField() on the parent) and thus there's no message. ++ return PyList_GET_SIZE(self->child_messages); ++ } ++} ++ ++// Returns 0 if successful; returns -1 and sets an exception if ++// unsuccessful. ++static int UpdateChildMessages(RepeatedCompositeContainer* self) { ++ if (self->message == NULL) ++ return 0; ++ ++ // A MergeFrom on a parent message could have caused extra messages to be ++ // added in the underlying protobuf so add them to our list. They can never ++ // be removed in such a way so there's no need to worry about that. ++ Py_ssize_t message_length = Length(self); ++ Py_ssize_t child_length = PyList_GET_SIZE(self->child_messages); ++ google::protobuf::Message* message = self->message; ++ const google::protobuf::Reflection* reflection = message->GetReflection(); ++ for (Py_ssize_t i = child_length; i < message_length; ++i) { ++ const Message& sub_message = reflection->GetRepeatedMessage( ++ *(self->message), self->parent_field->descriptor, i); ++ ScopedPyObjectPtr py_cmsg(cmessage::NewEmpty(self->subclass_init)); ++ if (py_cmsg == NULL) { ++ return -1; ++ } ++ CMessage* cmsg = reinterpret_cast(py_cmsg.get()); ++ cmsg->owner = self->owner; ++ cmsg->message = const_cast(&sub_message); ++ cmsg->parent = self->parent; ++ if (cmessage::InitAttributes(cmsg, NULL, NULL) < 0) { ++ return -1; ++ } ++ PyList_Append(self->child_messages, py_cmsg); ++ } ++ return 0; ++} ++ ++// --------------------------------------------------------------------- ++// add() ++ ++static PyObject* AddToAttached(RepeatedCompositeContainer* self, ++ PyObject* args, ++ PyObject* kwargs) { ++ GOOGLE_CHECK_ATTACHED(self); ++ ++ if (UpdateChildMessages(self) < 0) { ++ return NULL; ++ } ++ if (cmessage::AssureWritable(self->parent) == -1) ++ return NULL; ++ google::protobuf::Message* message = self->message; ++ google::protobuf::Message* sub_message = ++ message->GetReflection()->AddMessage(message, ++ self->parent_field->descriptor); ++ PyObject* py_cmsg = cmessage::NewEmpty(self->subclass_init); ++ if (py_cmsg == NULL) { ++ return NULL; ++ } ++ CMessage* cmsg = reinterpret_cast(py_cmsg); ++ ++ cmsg->owner = self->owner; ++ cmsg->message = sub_message; ++ cmsg->parent = self->parent; ++ // cmessage::InitAttributes must be called after cmsg->message has ++ // been set. ++ if (cmessage::InitAttributes(cmsg, NULL, kwargs) < 0) { ++ Py_DECREF(py_cmsg); ++ return NULL; ++ } ++ PyList_Append(self->child_messages, py_cmsg); ++ return py_cmsg; ++} ++ ++static PyObject* AddToReleased(RepeatedCompositeContainer* self, ++ PyObject* args, ++ PyObject* kwargs) { ++ GOOGLE_CHECK_RELEASED(self); ++ ++ // Create the CMessage ++ PyObject* py_cmsg = PyObject_CallObject(self->subclass_init, NULL); ++ if (py_cmsg == NULL) ++ return NULL; ++ CMessage* cmsg = reinterpret_cast(py_cmsg); ++ if (cmessage::InitAttributes(cmsg, NULL, kwargs) < 0) { ++ Py_DECREF(py_cmsg); ++ return NULL; ++ } ++ ++ // The Message got created by the call to subclass_init above and ++ // it set self->owner to the newly allocated message. ++ ++ PyList_Append(self->child_messages, py_cmsg); ++ return py_cmsg; ++} ++ ++PyObject* Add(RepeatedCompositeContainer* self, ++ PyObject* args, ++ PyObject* kwargs) { ++ if (self->message == NULL) ++ return AddToReleased(self, args, kwargs); ++ else ++ return AddToAttached(self, args, kwargs); ++} ++ ++// --------------------------------------------------------------------- ++// extend() ++ ++PyObject* Extend(RepeatedCompositeContainer* self, PyObject* value) { ++ cmessage::AssureWritable(self->parent); ++ if (UpdateChildMessages(self) < 0) { ++ return NULL; ++ } ++ ScopedPyObjectPtr iter(PyObject_GetIter(value)); ++ if (iter == NULL) { ++ PyErr_SetString(PyExc_TypeError, "Value must be iterable"); ++ return NULL; ++ } ++ ScopedPyObjectPtr next; ++ while ((next.reset(PyIter_Next(iter))) != NULL) { ++ if (!PyObject_TypeCheck(next, &CMessage_Type)) { ++ PyErr_SetString(PyExc_TypeError, "Not a cmessage"); ++ return NULL; ++ } ++ ScopedPyObjectPtr new_message(Add(self, NULL, NULL)); ++ if (new_message == NULL) { ++ return NULL; ++ } ++ CMessage* new_cmessage = reinterpret_cast(new_message.get()); ++ if (cmessage::MergeFrom(new_cmessage, next) == NULL) { ++ return NULL; ++ } ++ } ++ if (PyErr_Occurred()) { ++ return NULL; ++ } ++ Py_RETURN_NONE; ++} ++ ++PyObject* MergeFrom(RepeatedCompositeContainer* self, PyObject* other) { ++ if (UpdateChildMessages(self) < 0) { ++ return NULL; ++ } ++ return Extend(self, other); ++} ++ ++PyObject* Subscript(RepeatedCompositeContainer* self, PyObject* slice) { ++ if (UpdateChildMessages(self) < 0) { ++ return NULL; ++ } ++ Py_ssize_t from; ++ Py_ssize_t to; ++ Py_ssize_t step; ++ Py_ssize_t length = Length(self); ++ Py_ssize_t slicelength; ++ if (PySlice_Check(slice)) { ++#if PY_MAJOR_VERSION >= 3 ++ if (PySlice_GetIndicesEx(slice, ++#else ++ if (PySlice_GetIndicesEx(reinterpret_cast(slice), ++#endif ++ length, &from, &to, &step, &slicelength) == -1) { ++ return NULL; ++ } ++ return PyList_GetSlice(self->child_messages, from, to); ++ } else if (PyInt_Check(slice) || PyLong_Check(slice)) { ++ from = to = PyLong_AsLong(slice); ++ if (from < 0) { ++ from = to = length + from; ++ } ++ PyObject* result = PyList_GetItem(self->child_messages, from); ++ if (result == NULL) { ++ return NULL; ++ } ++ Py_INCREF(result); ++ return result; ++ } ++ PyErr_SetString(PyExc_TypeError, "index must be an integer or slice"); ++ return NULL; ++} ++ ++int AssignSubscript(RepeatedCompositeContainer* self, ++ PyObject* slice, ++ PyObject* value) { ++ if (UpdateChildMessages(self) < 0) { ++ return -1; ++ } ++ if (value != NULL) { ++ PyErr_SetString(PyExc_TypeError, "does not support assignment"); ++ return -1; ++ } ++ ++ // Delete from the underlying Message, if any. ++ if (self->message != NULL) { ++ if (cmessage::InternalDeleteRepeatedField(self->message, ++ self->parent_field->descriptor, ++ slice, ++ self->child_messages) < 0) { ++ return -1; ++ } ++ } else { ++ Py_ssize_t from; ++ Py_ssize_t to; ++ Py_ssize_t step; ++ Py_ssize_t length = Length(self); ++ Py_ssize_t slicelength; ++ if (PySlice_Check(slice)) { ++#if PY_MAJOR_VERSION >= 3 ++ if (PySlice_GetIndicesEx(slice, ++#else ++ if (PySlice_GetIndicesEx(reinterpret_cast(slice), ++#endif ++ length, &from, &to, &step, &slicelength) == -1) { ++ return -1; ++ } ++ return PySequence_DelSlice(self->child_messages, from, to); ++ } else if (PyInt_Check(slice) || PyLong_Check(slice)) { ++ from = to = PyLong_AsLong(slice); ++ if (from < 0) { ++ from = to = length + from; ++ } ++ return PySequence_DelItem(self->child_messages, from); ++ } ++ } ++ ++ return 0; ++} ++ ++static PyObject* Remove(RepeatedCompositeContainer* self, PyObject* value) { ++ if (UpdateChildMessages(self) < 0) { ++ return NULL; ++ } ++ Py_ssize_t index = PySequence_Index(self->child_messages, value); ++ if (index == -1) { ++ return NULL; ++ } ++ ScopedPyObjectPtr py_index(PyLong_FromLong(index)); ++ if (AssignSubscript(self, py_index, NULL) < 0) { ++ return NULL; ++ } ++ Py_RETURN_NONE; ++} ++ ++static PyObject* RichCompare(RepeatedCompositeContainer* self, ++ PyObject* other, ++ int opid) { ++ if (UpdateChildMessages(self) < 0) { ++ return NULL; ++ } ++ if (!PyObject_TypeCheck(other, &RepeatedCompositeContainer_Type)) { ++ PyErr_SetString(PyExc_TypeError, ++ "Can only compare repeated composite fields " ++ "against other repeated composite fields."); ++ return NULL; ++ } ++ if (opid == Py_EQ || opid == Py_NE) { ++ // TODO(anuraag): Don't make new lists just for this... ++ ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ++ if (full_slice == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr list(Subscript(self, full_slice)); ++ if (list == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr other_list( ++ Subscript( ++ reinterpret_cast(other), full_slice)); ++ if (other_list == NULL) { ++ return NULL; ++ } ++ return PyObject_RichCompare(list, other_list, opid); ++ } else { ++ Py_INCREF(Py_NotImplemented); ++ return Py_NotImplemented; ++ } ++} ++ ++// --------------------------------------------------------------------- ++// sort() ++ ++static PyObject* SortAttached(RepeatedCompositeContainer* self, ++ PyObject* args, ++ PyObject* kwds) { ++ // Sort the underlying Message array. ++ PyObject *compare = NULL; ++ int reverse = 0; ++ PyObject *keyfunc = NULL; ++ static char *kwlist[] = {"cmp", "key", "reverse", 0}; ++ ++ if (args != NULL) { ++ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOi:sort", ++ kwlist, &compare, &keyfunc, &reverse)) ++ return NULL; ++ } ++ if (compare == Py_None) ++ compare = NULL; ++ if (keyfunc == Py_None) ++ keyfunc = NULL; ++ ++ const Py_ssize_t length = Length(self); ++ if (InternalQuickSort(self, 0, length, compare, keyfunc) < 0) ++ return NULL; ++ ++ // Finally reverse the result if requested. ++ if (reverse) { ++ google::protobuf::Message* message = self->message; ++ const google::protobuf::Reflection* reflection = message->GetReflection(); ++ const google::protobuf::FieldDescriptor* descriptor = self->parent_field->descriptor; ++ ++ // Reverse the Message array. ++ for (int i = 0; i < length / 2; ++i) ++ reflection->SwapElements(message, descriptor, i, length - i - 1); ++ ++ // Reverse the Python list. ++ ScopedPyObjectPtr res(PyObject_CallMethod(self->child_messages, ++ "reverse", NULL)); ++ if (res == NULL) ++ return NULL; ++ } ++ ++ Py_RETURN_NONE; ++} ++ ++static PyObject* SortReleased(RepeatedCompositeContainer* self, ++ PyObject* args, ++ PyObject* kwds) { ++ ScopedPyObjectPtr m(PyObject_GetAttrString(self->child_messages, "sort")); ++ if (m == NULL) ++ return NULL; ++ if (PyObject_Call(m, args, kwds) == NULL) ++ return NULL; ++ Py_RETURN_NONE; ++} ++ ++static PyObject* Sort(RepeatedCompositeContainer* self, ++ PyObject* args, ++ PyObject* kwds) { ++ // Support the old sort_function argument for backwards ++ // compatibility. ++ if (kwds != NULL) { ++ PyObject* sort_func = PyDict_GetItemString(kwds, "sort_function"); ++ if (sort_func != NULL) { ++ // Must set before deleting as sort_func is a borrowed reference ++ // and kwds might be the only thing keeping it alive. ++ PyDict_SetItemString(kwds, "cmp", sort_func); ++ PyDict_DelItemString(kwds, "sort_function"); ++ } ++ } ++ ++ if (UpdateChildMessages(self) < 0) ++ return NULL; ++ if (self->message == NULL) { ++ return SortReleased(self, args, kwds); ++ } else { ++ return SortAttached(self, args, kwds); ++ } ++} ++ ++// --------------------------------------------------------------------- ++ ++static PyObject* Item(RepeatedCompositeContainer* self, Py_ssize_t index) { ++ if (UpdateChildMessages(self) < 0) { ++ return NULL; ++ } ++ Py_ssize_t length = Length(self); ++ if (index < 0) { ++ index = length + index; ++ } ++ PyObject* item = PyList_GetItem(self->child_messages, index); ++ if (item == NULL) { ++ return NULL; ++ } ++ Py_INCREF(item); ++ return item; ++} ++ ++// The caller takes ownership of the returned Message. ++Message* ReleaseLast(const FieldDescriptor* field, ++ const Descriptor* type, ++ Message* message) { ++ GOOGLE_CHECK_NOTNULL(field); ++ GOOGLE_CHECK_NOTNULL(type); ++ GOOGLE_CHECK_NOTNULL(message); ++ ++ Message* released_message = message->GetReflection()->ReleaseLast( ++ message, field); ++ // TODO(tibell): Deal with proto1. ++ ++ // ReleaseMessage will return NULL which differs from ++ // child_cmessage->message, if the field does not exist. In this case, ++ // the latter points to the default instance via a const_cast<>, so we ++ // have to reset it to a new mutable object since we are taking ownership. ++ if (released_message == NULL) { ++ const Message* prototype = global_message_factory->GetPrototype(type); ++ GOOGLE_CHECK_NOTNULL(prototype); ++ return prototype->New(); ++ } else { ++ return released_message; ++ } ++} ++ ++// Release field of message and transfer the ownership to cmessage. ++void ReleaseLastTo(const FieldDescriptor* field, ++ Message* message, ++ CMessage* cmessage) { ++ GOOGLE_CHECK_NOTNULL(field); ++ GOOGLE_CHECK_NOTNULL(message); ++ GOOGLE_CHECK_NOTNULL(cmessage); ++ ++ shared_ptr released_message( ++ ReleaseLast(field, cmessage->message->GetDescriptor(), message)); ++ cmessage->parent = NULL; ++ cmessage->parent_field = NULL; ++ cmessage->message = released_message.get(); ++ cmessage->read_only = false; ++ cmessage::SetOwner(cmessage, released_message); ++} ++ ++// Called to release a container using ++// ClearField('container_field_name') on the parent. ++int Release(RepeatedCompositeContainer* self) { ++ if (UpdateChildMessages(self) < 0) { ++ PyErr_WriteUnraisable(PyBytes_FromString("Failed to update released " ++ "messages")); ++ return -1; ++ } ++ ++ Message* message = self->message; ++ const FieldDescriptor* field = self->parent_field->descriptor; ++ ++ // The reflection API only lets us release the last message in a ++ // repeated field. Therefore we iterate through the children ++ // starting with the last one. ++ const Py_ssize_t size = PyList_GET_SIZE(self->child_messages); ++ GOOGLE_DCHECK_EQ(size, message->GetReflection()->FieldSize(*message, field)); ++ for (Py_ssize_t i = size - 1; i >= 0; --i) { ++ CMessage* child_cmessage = reinterpret_cast( ++ PyList_GET_ITEM(self->child_messages, i)); ++ ReleaseLastTo(field, message, child_cmessage); ++ } ++ ++ // Detach from containing message. ++ self->parent = NULL; ++ self->parent_field = NULL; ++ self->message = NULL; ++ self->owner.reset(); ++ ++ return 0; ++} ++ ++int SetOwner(RepeatedCompositeContainer* self, ++ const shared_ptr& new_owner) { ++ GOOGLE_CHECK_ATTACHED(self); ++ ++ self->owner = new_owner; ++ const Py_ssize_t n = PyList_GET_SIZE(self->child_messages); ++ for (Py_ssize_t i = 0; i < n; ++i) { ++ PyObject* msg = PyList_GET_ITEM(self->child_messages, i); ++ if (cmessage::SetOwner(reinterpret_cast(msg), new_owner) == -1) { ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++static int Init(RepeatedCompositeContainer* self, ++ PyObject* args, ++ PyObject* kwargs) { ++ self->message = NULL; ++ self->parent = NULL; ++ self->parent_field = NULL; ++ self->subclass_init = NULL; ++ self->child_messages = PyList_New(0); ++ return 0; ++} ++ ++static void Dealloc(RepeatedCompositeContainer* self) { ++ Py_CLEAR(self->child_messages); ++ // TODO(tibell): Do we need to call delete on these objects to make ++ // sure their destructors are called? ++ self->owner.reset(); ++ Py_TYPE(self)->tp_free(reinterpret_cast(self)); ++} ++ ++static PySequenceMethods SqMethods = { ++ (lenfunc)Length, /* sq_length */ ++ 0, /* sq_concat */ ++ 0, /* sq_repeat */ ++ (ssizeargfunc)Item /* sq_item */ ++}; ++ ++static PyMappingMethods MpMethods = { ++ (lenfunc)Length, /* mp_length */ ++ (binaryfunc)Subscript, /* mp_subscript */ ++ (objobjargproc)AssignSubscript,/* mp_ass_subscript */ ++}; ++ ++static PyMethodDef Methods[] = { ++ { "add", (PyCFunction) Add, METH_VARARGS | METH_KEYWORDS, ++ "Adds an object to the repeated container." }, ++ { "extend", (PyCFunction) Extend, METH_O, ++ "Adds objects to the repeated container." }, ++ { "remove", (PyCFunction) Remove, METH_O, ++ "Removes an object from the repeated container." }, ++ { "sort", (PyCFunction) Sort, METH_VARARGS | METH_KEYWORDS, ++ "Sorts the repeated container." }, ++ { "MergeFrom", (PyCFunction) MergeFrom, METH_O, ++ "Adds objects to the repeated container." }, ++ { NULL, NULL } ++}; ++ ++} // namespace repeated_composite_container ++ ++PyTypeObject RepeatedCompositeContainer_Type = { ++ PyVarObject_HEAD_INIT(&PyType_Type, 0) ++ "google.protobuf.internal." ++ "cpp._message.RepeatedCompositeContainer", // tp_name ++ sizeof(RepeatedCompositeContainer), // tp_basicsize ++ 0, // tp_itemsize ++ (destructor)repeated_composite_container::Dealloc, // tp_dealloc ++ 0, // tp_print ++ 0, // tp_getattr ++ 0, // tp_setattr ++ 0, // tp_compare ++ 0, // tp_repr ++ 0, // tp_as_number ++ &repeated_composite_container::SqMethods, // tp_as_sequence ++ &repeated_composite_container::MpMethods, // tp_as_mapping ++ 0, // tp_hash ++ 0, // tp_call ++ 0, // tp_str ++ 0, // tp_getattro ++ 0, // tp_setattro ++ 0, // tp_as_buffer ++ Py_TPFLAGS_DEFAULT, // tp_flags ++ "A Repeated scalar container", // tp_doc ++ 0, // tp_traverse ++ 0, // tp_clear ++ (richcmpfunc)repeated_composite_container::RichCompare, // tp_richcompare ++ 0, // tp_weaklistoffset ++ 0, // tp_iter ++ 0, // tp_iternext ++ repeated_composite_container::Methods, // tp_methods ++ 0, // tp_members ++ 0, // tp_getset ++ 0, // tp_base ++ 0, // tp_dict ++ 0, // tp_descr_get ++ 0, // tp_descr_set ++ 0, // tp_dictoffset ++ (initproc)repeated_composite_container::Init, // tp_init ++}; ++ ++} // namespace python ++} // namespace protobuf ++} // namespace google +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/repeated_composite_container.h +@@ -0,0 +1,172 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: anuraag@google.com (Anuraag Agrawal) ++// Author: tibell@google.com (Johan Tibell) ++ ++#ifndef GOOGLE_PROTOBUF_PYTHON_CPP_REPEATED_COMPOSITE_CONTAINER_H__ ++#define GOOGLE_PROTOBUF_PYTHON_CPP_REPEATED_COMPOSITE_CONTAINER_H__ ++ ++#include ++ ++#include ++#ifndef _SHARED_PTR_H ++#include ++#endif ++#include ++#include ++ ++ ++namespace google { ++namespace protobuf { ++ ++class FieldDescriptor; ++class Message; ++ ++using internal::shared_ptr; ++ ++namespace python { ++ ++struct CMessage; ++struct CFieldDescriptor; ++ ++// A RepeatedCompositeContainer can be in one of two states: attached ++// or released. ++// ++// When in the attached state all modifications to the container are ++// done both on the 'message' and on the 'child_messages' ++// list. In this state all Messages refered to by the children in ++// 'child_messages' are owner by the 'owner'. ++// ++// When in the released state 'message', 'owner', 'parent', and ++// 'parent_field' are NULL. ++typedef struct RepeatedCompositeContainer { ++ PyObject_HEAD; ++ ++ // This is the top-level C++ Message object that owns the whole ++ // proto tree. Every Python RepeatedCompositeContainer holds a ++ // reference to it in order to keep it alive as long as there's a ++ // Python object that references any part of the tree. ++ shared_ptr owner; ++ ++ // Weak reference to parent object. May be NULL. Used to make sure ++ // the parent is writable before modifying the ++ // RepeatedCompositeContainer. ++ CMessage* parent; ++ ++ // A descriptor used to modify the underlying 'message'. ++ CFieldDescriptor* parent_field; ++ ++ // Pointer to the C++ Message that contains this container. The ++ // RepeatedCompositeContainer does not own this pointer. ++ // ++ // If NULL, this message has been released from its parent (by ++ // calling Clear() or ClearField() on the parent. ++ Message* message; ++ ++ // A callable that is used to create new child messages. ++ PyObject* subclass_init; ++ ++ // A list of child messages. ++ PyObject* child_messages; ++} RepeatedCompositeContainer; ++ ++extern PyTypeObject RepeatedCompositeContainer_Type; ++ ++namespace repeated_composite_container { ++ ++// Returns the number of items in this repeated composite container. ++static Py_ssize_t Length(RepeatedCompositeContainer* self); ++ ++// Appends a new CMessage to the container and returns it. The ++// CMessage is initialized using the content of kwargs. ++// ++// Returns a new reference if successful; returns NULL and sets an ++// exception if unsuccessful. ++PyObject* Add(RepeatedCompositeContainer* self, ++ PyObject* args, ++ PyObject* kwargs); ++ ++// Appends all the CMessages in the input iterator to the container. ++// ++// Returns None if successful; returns NULL and sets an exception if ++// unsuccessful. ++PyObject* Extend(RepeatedCompositeContainer* self, PyObject* value); ++ ++// Appends a new message to the container for each message in the ++// input iterator, merging each data element in. Equivalent to extend. ++// ++// Returns None if successful; returns NULL and sets an exception if ++// unsuccessful. ++PyObject* MergeFrom(RepeatedCompositeContainer* self, PyObject* other); ++ ++// Accesses messages in the container. ++// ++// Returns a new reference to the message for an integer parameter. ++// Returns a new reference to a list of messages for a slice. ++PyObject* Subscript(RepeatedCompositeContainer* self, PyObject* slice); ++ ++// Deletes items from the container (cannot be used for assignment). ++// ++// Returns 0 on success, -1 on failure. ++int AssignSubscript(RepeatedCompositeContainer* self, ++ PyObject* slice, ++ PyObject* value); ++ ++// Releases the messages in the container to the given message. ++// ++// Returns 0 on success, -1 on failure. ++int ReleaseToMessage(RepeatedCompositeContainer* self, ++ google::protobuf::Message* new_message); ++ ++// Releases the messages in the container to a new message. ++// ++// Returns 0 on success, -1 on failure. ++int Release(RepeatedCompositeContainer* self); ++ ++// Returns 0 on success, -1 on failure. ++int SetOwner(RepeatedCompositeContainer* self, ++ const shared_ptr& new_owner); ++ ++// Removes the last element of the repeated message field 'field' on ++// the Message 'message', and transfers the ownership of the released ++// Message to 'cmessage'. ++// ++// Corresponds to reflection api method ReleaseMessage. ++void ReleaseLastTo(const FieldDescriptor* field, ++ Message* message, ++ CMessage* cmessage); ++ ++} // namespace repeated_composite_container ++} // namespace python ++} // namespace protobuf ++ ++} // namespace google ++#endif // GOOGLE_PROTOBUF_PYTHON_CPP_REPEATED_COMPOSITE_CONTAINER_H__ +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/repeated_scalar_container.cc +@@ -0,0 +1,825 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: anuraag@google.com (Anuraag Agrawal) ++// Author: tibell@google.com (Johan Tibell) ++ ++#include ++ ++#include ++#ifndef _SHARED_PTR_H ++#include ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#if PY_MAJOR_VERSION >= 3 ++ #define PyInt_FromLong PyLong_FromLong ++ #if PY_VERSION_HEX < 0x03030000 ++ #error "Python 3.0 - 3.2 are not supported." ++ #else ++ #define PyString_AsString(ob) \ ++ (PyUnicode_Check(ob)? PyUnicode_AsUTF8(ob): PyBytes_AS_STRING(ob)) ++ #endif ++#endif ++ ++namespace google { ++namespace protobuf { ++namespace python { ++ ++extern google::protobuf::DynamicMessageFactory* global_message_factory; ++ ++namespace repeated_scalar_container { ++ ++static int InternalAssignRepeatedField( ++ RepeatedScalarContainer* self, PyObject* list) { ++ self->message->GetReflection()->ClearField(self->message, ++ self->parent_field->descriptor); ++ for (Py_ssize_t i = 0; i < PyList_GET_SIZE(list); ++i) { ++ PyObject* value = PyList_GET_ITEM(list, i); ++ if (Append(self, value) == NULL) { ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++static Py_ssize_t Len(RepeatedScalarContainer* self) { ++ google::protobuf::Message* message = self->message; ++ return message->GetReflection()->FieldSize(*message, ++ self->parent_field->descriptor); ++} ++ ++static int AssignItem(RepeatedScalarContainer* self, ++ Py_ssize_t index, ++ PyObject* arg) { ++ cmessage::AssureWritable(self->parent); ++ google::protobuf::Message* message = self->message; ++ const google::protobuf::FieldDescriptor* field_descriptor = ++ self->parent_field->descriptor; ++ if (!FIELD_BELONGS_TO_MESSAGE(field_descriptor, message)) { ++ PyErr_SetString( ++ PyExc_KeyError, "Field does not belong to message!"); ++ return -1; ++ } ++ ++ const google::protobuf::Reflection* reflection = message->GetReflection(); ++ int field_size = reflection->FieldSize(*message, field_descriptor); ++ if (index < 0) { ++ index = field_size + index; ++ } ++ if (index < 0 || index >= field_size) { ++ PyErr_Format(PyExc_IndexError, ++ "list assignment index (%d) out of range", ++ static_cast(index)); ++ return -1; ++ } ++ ++ if (arg == NULL) { ++ ScopedPyObjectPtr py_index(PyLong_FromLong(index)); ++ return cmessage::InternalDeleteRepeatedField(message, field_descriptor, ++ py_index, NULL); ++ } ++ ++ if (PySequence_Check(arg) && !(PyBytes_Check(arg) || PyUnicode_Check(arg))) { ++ PyErr_SetString(PyExc_TypeError, "Value must be scalar"); ++ return -1; ++ } ++ ++ switch (field_descriptor->cpp_type()) { ++ case google::protobuf::FieldDescriptor::CPPTYPE_INT32: { ++ GOOGLE_CHECK_GET_INT32(arg, value, -1); ++ reflection->SetRepeatedInt32(message, field_descriptor, index, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_INT64: { ++ GOOGLE_CHECK_GET_INT64(arg, value, -1); ++ reflection->SetRepeatedInt64(message, field_descriptor, index, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_UINT32: { ++ GOOGLE_CHECK_GET_UINT32(arg, value, -1); ++ reflection->SetRepeatedUInt32(message, field_descriptor, index, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_UINT64: { ++ GOOGLE_CHECK_GET_UINT64(arg, value, -1); ++ reflection->SetRepeatedUInt64(message, field_descriptor, index, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT: { ++ GOOGLE_CHECK_GET_FLOAT(arg, value, -1); ++ reflection->SetRepeatedFloat(message, field_descriptor, index, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE: { ++ GOOGLE_CHECK_GET_DOUBLE(arg, value, -1); ++ reflection->SetRepeatedDouble(message, field_descriptor, index, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_BOOL: { ++ GOOGLE_CHECK_GET_BOOL(arg, value, -1); ++ reflection->SetRepeatedBool(message, field_descriptor, index, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_STRING: { ++ if (!CheckAndSetString( ++ arg, message, field_descriptor, reflection, false, index)) { ++ return -1; ++ } ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_ENUM: { ++ GOOGLE_CHECK_GET_INT32(arg, value, -1); ++ const google::protobuf::EnumDescriptor* enum_descriptor = ++ field_descriptor->enum_type(); ++ const google::protobuf::EnumValueDescriptor* enum_value = ++ enum_descriptor->FindValueByNumber(value); ++ if (enum_value != NULL) { ++ reflection->SetRepeatedEnum(message, field_descriptor, index, ++ enum_value); ++ } else { ++ ScopedPyObjectPtr s(PyObject_Str(arg)); ++ if (s != NULL) { ++ PyErr_Format(PyExc_ValueError, "Unknown enum value: %s", ++ PyString_AsString(s.get())); ++ } ++ return -1; ++ } ++ break; ++ } ++ default: ++ PyErr_Format( ++ PyExc_SystemError, "Adding value to a field of unknown type %d", ++ field_descriptor->cpp_type()); ++ return -1; ++ } ++ return 0; ++} ++ ++static PyObject* Item(RepeatedScalarContainer* self, Py_ssize_t index) { ++ google::protobuf::Message* message = self->message; ++ const google::protobuf::FieldDescriptor* field_descriptor = ++ self->parent_field->descriptor; ++ const google::protobuf::Reflection* reflection = message->GetReflection(); ++ ++ int field_size = reflection->FieldSize(*message, field_descriptor); ++ if (index < 0) { ++ index = field_size + index; ++ } ++ if (index < 0 || index >= field_size) { ++ PyErr_Format(PyExc_IndexError, ++ "list assignment index (%d) out of range", ++ static_cast(index)); ++ return NULL; ++ } ++ ++ PyObject* result = NULL; ++ switch (field_descriptor->cpp_type()) { ++ case google::protobuf::FieldDescriptor::CPPTYPE_INT32: { ++ int32 value = reflection->GetRepeatedInt32( ++ *message, field_descriptor, index); ++ result = PyInt_FromLong(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_INT64: { ++ int64 value = reflection->GetRepeatedInt64( ++ *message, field_descriptor, index); ++ result = PyLong_FromLongLong(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_UINT32: { ++ uint32 value = reflection->GetRepeatedUInt32( ++ *message, field_descriptor, index); ++ result = PyLong_FromLongLong(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_UINT64: { ++ uint64 value = reflection->GetRepeatedUInt64( ++ *message, field_descriptor, index); ++ result = PyLong_FromUnsignedLongLong(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT: { ++ float value = reflection->GetRepeatedFloat( ++ *message, field_descriptor, index); ++ result = PyFloat_FromDouble(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE: { ++ double value = reflection->GetRepeatedDouble( ++ *message, field_descriptor, index); ++ result = PyFloat_FromDouble(value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_BOOL: { ++ bool value = reflection->GetRepeatedBool( ++ *message, field_descriptor, index); ++ result = PyBool_FromLong(value ? 1 : 0); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_ENUM: { ++ const google::protobuf::EnumValueDescriptor* enum_value = ++ message->GetReflection()->GetRepeatedEnum( ++ *message, field_descriptor, index); ++ result = PyInt_FromLong(enum_value->number()); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_STRING: { ++ string value = reflection->GetRepeatedString( ++ *message, field_descriptor, index); ++ result = ToStringObject(field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE: { ++ PyObject* py_cmsg = PyObject_CallObject(reinterpret_cast( ++ &CMessage_Type), NULL); ++ if (py_cmsg == NULL) { ++ return NULL; ++ } ++ CMessage* cmsg = reinterpret_cast(py_cmsg); ++ const google::protobuf::Message& msg = reflection->GetRepeatedMessage( ++ *message, field_descriptor, index); ++ cmsg->owner = self->owner; ++ cmsg->parent = self->parent; ++ cmsg->message = const_cast(&msg); ++ cmsg->read_only = false; ++ result = reinterpret_cast(py_cmsg); ++ break; ++ } ++ default: ++ PyErr_Format( ++ PyExc_SystemError, ++ "Getting value from a repeated field of unknown type %d", ++ field_descriptor->cpp_type()); ++ } ++ ++ return result; ++} ++ ++static PyObject* Subscript(RepeatedScalarContainer* self, PyObject* slice) { ++ Py_ssize_t from; ++ Py_ssize_t to; ++ Py_ssize_t step; ++ Py_ssize_t length; ++ Py_ssize_t slicelength; ++ bool return_list = false; ++#if PY_MAJOR_VERSION < 3 ++ if (PyInt_Check(slice)) { ++ from = to = PyInt_AsLong(slice); ++ } else // NOLINT ++#endif ++ if (PyLong_Check(slice)) { ++ from = to = PyLong_AsLong(slice); ++ } else if (PySlice_Check(slice)) { ++ length = Len(self); ++#if PY_MAJOR_VERSION >= 3 ++ if (PySlice_GetIndicesEx(slice, ++#else ++ if (PySlice_GetIndicesEx(reinterpret_cast(slice), ++#endif ++ length, &from, &to, &step, &slicelength) == -1) { ++ return NULL; ++ } ++ return_list = true; ++ } else { ++ PyErr_SetString(PyExc_TypeError, "list indices must be integers"); ++ return NULL; ++ } ++ ++ if (!return_list) { ++ return Item(self, from); ++ } ++ ++ PyObject* list = PyList_New(0); ++ if (list == NULL) { ++ return NULL; ++ } ++ if (from <= to) { ++ if (step < 0) { ++ return list; ++ } ++ for (Py_ssize_t index = from; index < to; index += step) { ++ if (index < 0 || index >= length) { ++ break; ++ } ++ ScopedPyObjectPtr s(Item(self, index)); ++ PyList_Append(list, s); ++ } ++ } else { ++ if (step > 0) { ++ return list; ++ } ++ for (Py_ssize_t index = from; index > to; index += step) { ++ if (index < 0 || index >= length) { ++ break; ++ } ++ ScopedPyObjectPtr s(Item(self, index)); ++ PyList_Append(list, s); ++ } ++ } ++ return list; ++} ++ ++PyObject* Append(RepeatedScalarContainer* self, PyObject* item) { ++ cmessage::AssureWritable(self->parent); ++ google::protobuf::Message* message = self->message; ++ const google::protobuf::FieldDescriptor* field_descriptor = ++ self->parent_field->descriptor; ++ ++ if (!FIELD_BELONGS_TO_MESSAGE(field_descriptor, message)) { ++ PyErr_SetString( ++ PyExc_KeyError, "Field does not belong to message!"); ++ return NULL; ++ } ++ ++ const google::protobuf::Reflection* reflection = message->GetReflection(); ++ switch (field_descriptor->cpp_type()) { ++ case google::protobuf::FieldDescriptor::CPPTYPE_INT32: { ++ GOOGLE_CHECK_GET_INT32(item, value, NULL); ++ reflection->AddInt32(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_INT64: { ++ GOOGLE_CHECK_GET_INT64(item, value, NULL); ++ reflection->AddInt64(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_UINT32: { ++ GOOGLE_CHECK_GET_UINT32(item, value, NULL); ++ reflection->AddUInt32(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_UINT64: { ++ GOOGLE_CHECK_GET_UINT64(item, value, NULL); ++ reflection->AddUInt64(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT: { ++ GOOGLE_CHECK_GET_FLOAT(item, value, NULL); ++ reflection->AddFloat(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE: { ++ GOOGLE_CHECK_GET_DOUBLE(item, value, NULL); ++ reflection->AddDouble(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_BOOL: { ++ GOOGLE_CHECK_GET_BOOL(item, value, NULL); ++ reflection->AddBool(message, field_descriptor, value); ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_STRING: { ++ if (!CheckAndSetString( ++ item, message, field_descriptor, reflection, true, -1)) { ++ return NULL; ++ } ++ break; ++ } ++ case google::protobuf::FieldDescriptor::CPPTYPE_ENUM: { ++ GOOGLE_CHECK_GET_INT32(item, value, NULL); ++ const google::protobuf::EnumDescriptor* enum_descriptor = ++ field_descriptor->enum_type(); ++ const google::protobuf::EnumValueDescriptor* enum_value = ++ enum_descriptor->FindValueByNumber(value); ++ if (enum_value != NULL) { ++ reflection->AddEnum(message, field_descriptor, enum_value); ++ } else { ++ ScopedPyObjectPtr s(PyObject_Str(item)); ++ if (s != NULL) { ++ PyErr_Format(PyExc_ValueError, "Unknown enum value: %s", ++ PyString_AsString(s.get())); ++ } ++ return NULL; ++ } ++ break; ++ } ++ default: ++ PyErr_Format( ++ PyExc_SystemError, "Adding value to a field of unknown type %d", ++ field_descriptor->cpp_type()); ++ return NULL; ++ } ++ ++ Py_RETURN_NONE; ++} ++ ++static int AssSubscript(RepeatedScalarContainer* self, ++ PyObject* slice, ++ PyObject* value) { ++ Py_ssize_t from; ++ Py_ssize_t to; ++ Py_ssize_t step; ++ Py_ssize_t length; ++ Py_ssize_t slicelength; ++ bool create_list = false; ++ ++ cmessage::AssureWritable(self->parent); ++ google::protobuf::Message* message = self->message; ++ const google::protobuf::FieldDescriptor* field_descriptor = ++ self->parent_field->descriptor; ++ ++#if PY_MAJOR_VERSION < 3 ++ if (PyInt_Check(slice)) { ++ from = to = PyInt_AsLong(slice); ++ } else ++#endif ++ if (PyLong_Check(slice)) { ++ from = to = PyLong_AsLong(slice); ++ } else if (PySlice_Check(slice)) { ++ const google::protobuf::Reflection* reflection = message->GetReflection(); ++ length = reflection->FieldSize(*message, field_descriptor); ++#if PY_MAJOR_VERSION >= 3 ++ if (PySlice_GetIndicesEx(slice, ++#else ++ if (PySlice_GetIndicesEx(reinterpret_cast(slice), ++#endif ++ length, &from, &to, &step, &slicelength) == -1) { ++ return -1; ++ } ++ create_list = true; ++ } else { ++ PyErr_SetString(PyExc_TypeError, "list indices must be integers"); ++ return -1; ++ } ++ ++ if (value == NULL) { ++ return cmessage::InternalDeleteRepeatedField( ++ message, field_descriptor, slice, NULL); ++ } ++ ++ if (!create_list) { ++ return AssignItem(self, from, value); ++ } ++ ++ ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ++ if (full_slice == NULL) { ++ return -1; ++ } ++ ScopedPyObjectPtr new_list(Subscript(self, full_slice)); ++ if (new_list == NULL) { ++ return -1; ++ } ++ if (PySequence_SetSlice(new_list, from, to, value) < 0) { ++ return -1; ++ } ++ ++ return InternalAssignRepeatedField(self, new_list); ++} ++ ++PyObject* Extend(RepeatedScalarContainer* self, PyObject* value) { ++ cmessage::AssureWritable(self->parent); ++ if (PyObject_Not(value)) { ++ Py_RETURN_NONE; ++ } ++ ScopedPyObjectPtr iter(PyObject_GetIter(value)); ++ if (iter == NULL) { ++ PyErr_SetString(PyExc_TypeError, "Value must be iterable"); ++ return NULL; ++ } ++ ScopedPyObjectPtr next; ++ while ((next.reset(PyIter_Next(iter))) != NULL) { ++ if (Append(self, next) == NULL) { ++ return NULL; ++ } ++ } ++ if (PyErr_Occurred()) { ++ return NULL; ++ } ++ Py_RETURN_NONE; ++} ++ ++static PyObject* Insert(RepeatedScalarContainer* self, PyObject* args) { ++ Py_ssize_t index; ++ PyObject* value; ++ if (!PyArg_ParseTuple(args, "lO", &index, &value)) { ++ return NULL; ++ } ++ ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ++ ScopedPyObjectPtr new_list(Subscript(self, full_slice)); ++ if (PyList_Insert(new_list, index, value) < 0) { ++ return NULL; ++ } ++ int ret = InternalAssignRepeatedField(self, new_list); ++ if (ret < 0) { ++ return NULL; ++ } ++ Py_RETURN_NONE; ++} ++ ++static PyObject* Remove(RepeatedScalarContainer* self, PyObject* value) { ++ Py_ssize_t match_index = -1; ++ for (Py_ssize_t i = 0; i < Len(self); ++i) { ++ ScopedPyObjectPtr elem(Item(self, i)); ++ if (PyObject_RichCompareBool(elem, value, Py_EQ)) { ++ match_index = i; ++ break; ++ } ++ } ++ if (match_index == -1) { ++ PyErr_SetString(PyExc_ValueError, "remove(x): x not in container"); ++ return NULL; ++ } ++ if (AssignItem(self, match_index, NULL) < 0) { ++ return NULL; ++ } ++ Py_RETURN_NONE; ++} ++ ++static PyObject* RichCompare(RepeatedScalarContainer* self, ++ PyObject* other, ++ int opid) { ++ if (opid != Py_EQ && opid != Py_NE) { ++ Py_INCREF(Py_NotImplemented); ++ return Py_NotImplemented; ++ } ++ ++ // Copy the contents of this repeated scalar container, and other if it is ++ // also a repeated scalar container, into Python lists so we can delegate ++ // to the list's compare method. ++ ++ ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ++ if (full_slice == NULL) { ++ return NULL; ++ } ++ ++ ScopedPyObjectPtr other_list_deleter; ++ if (PyObject_TypeCheck(other, &RepeatedScalarContainer_Type)) { ++ other_list_deleter.reset(Subscript( ++ reinterpret_cast(other), full_slice)); ++ other = other_list_deleter.get(); ++ } ++ ++ ScopedPyObjectPtr list(Subscript(self, full_slice)); ++ if (list == NULL) { ++ return NULL; ++ } ++ return PyObject_RichCompare(list, other, opid); ++} ++ ++PyObject* Reduce(RepeatedScalarContainer* unused_self) { ++ PyErr_Format( ++ PickleError_class, ++ "can't pickle repeated message fields, convert to list first"); ++ return NULL; ++} ++ ++static PyObject* Sort(RepeatedScalarContainer* self, ++ PyObject* args, ++ PyObject* kwds) { ++ // Support the old sort_function argument for backwards ++ // compatibility. ++ if (kwds != NULL) { ++ PyObject* sort_func = PyDict_GetItemString(kwds, "sort_function"); ++ if (sort_func != NULL) { ++ // Must set before deleting as sort_func is a borrowed reference ++ // and kwds might be the only thing keeping it alive. ++ if (PyDict_SetItemString(kwds, "cmp", sort_func) == -1) ++ return NULL; ++ if (PyDict_DelItemString(kwds, "sort_function") == -1) ++ return NULL; ++ } ++ } ++ ++ ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ++ if (full_slice == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr list(Subscript(self, full_slice)); ++ if (list == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr m(PyObject_GetAttrString(list, "sort")); ++ if (m == NULL) { ++ return NULL; ++ } ++ ScopedPyObjectPtr res(PyObject_Call(m, args, kwds)); ++ if (res == NULL) { ++ return NULL; ++ } ++ int ret = InternalAssignRepeatedField(self, list); ++ if (ret < 0) { ++ return NULL; ++ } ++ Py_RETURN_NONE; ++} ++ ++static int Init(RepeatedScalarContainer* self, ++ PyObject* args, ++ PyObject* kwargs) { ++ PyObject* py_parent; ++ PyObject* py_parent_field; ++ if (!PyArg_UnpackTuple(args, "__init__()", 2, 2, &py_parent, ++ &py_parent_field)) { ++ return -1; ++ } ++ ++ if (!PyObject_TypeCheck(py_parent, &CMessage_Type)) { ++ PyErr_Format(PyExc_TypeError, ++ "expect %s, but got %s", ++ CMessage_Type.tp_name, ++ Py_TYPE(py_parent)->tp_name); ++ return -1; ++ } ++ ++ if (!PyObject_TypeCheck(py_parent_field, &CFieldDescriptor_Type)) { ++ PyErr_Format(PyExc_TypeError, ++ "expect %s, but got %s", ++ CFieldDescriptor_Type.tp_name, ++ Py_TYPE(py_parent_field)->tp_name); ++ return -1; ++ } ++ ++ CMessage* cmessage = reinterpret_cast(py_parent); ++ CFieldDescriptor* cdescriptor = reinterpret_cast( ++ py_parent_field); ++ ++ if (!FIELD_BELONGS_TO_MESSAGE(cdescriptor->descriptor, cmessage->message)) { ++ PyErr_SetString( ++ PyExc_KeyError, "Field does not belong to message!"); ++ return -1; ++ } ++ ++ self->message = cmessage->message; ++ self->parent = cmessage; ++ self->parent_field = cdescriptor; ++ self->owner = cmessage->owner; ++ return 0; ++} ++ ++// Initializes the underlying Message object of "to" so it becomes a new parent ++// repeated scalar, and copies all the values from "from" to it. A child scalar ++// container can be released by passing it as both from and to (e.g. making it ++// the recipient of the new parent message and copying the values from itself). ++static int InitializeAndCopyToParentContainer( ++ RepeatedScalarContainer* from, ++ RepeatedScalarContainer* to) { ++ ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ++ if (full_slice == NULL) { ++ return -1; ++ } ++ ScopedPyObjectPtr values(Subscript(from, full_slice)); ++ if (values == NULL) { ++ return -1; ++ } ++ google::protobuf::Message* new_message = global_message_factory->GetPrototype( ++ from->message->GetDescriptor())->New(); ++ to->parent = NULL; ++ // TODO(anuraag): Document why it's OK to hang on to parent_field, ++ // even though it's a weak reference. It ought to be enough to ++ // hold on to the FieldDescriptor only. ++ to->parent_field = from->parent_field; ++ to->message = new_message; ++ to->owner.reset(new_message); ++ if (InternalAssignRepeatedField(to, values) < 0) { ++ return -1; ++ } ++ return 0; ++} ++ ++int Release(RepeatedScalarContainer* self) { ++ return InitializeAndCopyToParentContainer(self, self); ++} ++ ++PyObject* DeepCopy(RepeatedScalarContainer* self, PyObject* arg) { ++ ScopedPyObjectPtr init_args( ++ PyTuple_Pack(2, self->parent, self->parent_field)); ++ PyObject* clone = PyObject_CallObject( ++ reinterpret_cast(&RepeatedScalarContainer_Type), init_args); ++ if (clone == NULL) { ++ return NULL; ++ } ++ if (!PyObject_TypeCheck(clone, &RepeatedScalarContainer_Type)) { ++ Py_DECREF(clone); ++ return NULL; ++ } ++ if (InitializeAndCopyToParentContainer( ++ self, reinterpret_cast(clone)) < 0) { ++ Py_DECREF(clone); ++ return NULL; ++ } ++ return clone; ++} ++ ++static void Dealloc(RepeatedScalarContainer* self) { ++ self->owner.reset(); ++ Py_TYPE(self)->tp_free(reinterpret_cast(self)); ++} ++ ++void SetOwner(RepeatedScalarContainer* self, ++ const shared_ptr& new_owner) { ++ self->owner = new_owner; ++} ++ ++static PySequenceMethods SqMethods = { ++ (lenfunc)Len, /* sq_length */ ++ 0, /* sq_concat */ ++ 0, /* sq_repeat */ ++ (ssizeargfunc)Item, /* sq_item */ ++ 0, /* sq_slice */ ++ (ssizeobjargproc)AssignItem /* sq_ass_item */ ++}; ++ ++static PyMappingMethods MpMethods = { ++ (lenfunc)Len, /* mp_length */ ++ (binaryfunc)Subscript, /* mp_subscript */ ++ (objobjargproc)AssSubscript, /* mp_ass_subscript */ ++}; ++ ++static PyMethodDef Methods[] = { ++ { "__deepcopy__", (PyCFunction)DeepCopy, METH_VARARGS, ++ "Makes a deep copy of the class." }, ++ { "__reduce__", (PyCFunction)Reduce, METH_NOARGS, ++ "Outputs picklable representation of the repeated field." }, ++ { "append", (PyCFunction)Append, METH_O, ++ "Appends an object to the repeated container." }, ++ { "extend", (PyCFunction)Extend, METH_O, ++ "Appends objects to the repeated container." }, ++ { "insert", (PyCFunction)Insert, METH_VARARGS, ++ "Appends objects to the repeated container." }, ++ { "remove", (PyCFunction)Remove, METH_O, ++ "Removes an object from the repeated container." }, ++ { "sort", (PyCFunction)Sort, METH_VARARGS | METH_KEYWORDS, ++ "Sorts the repeated container."}, ++ { NULL, NULL } ++}; ++ ++} // namespace repeated_scalar_container ++ ++PyTypeObject RepeatedScalarContainer_Type = { ++ PyVarObject_HEAD_INIT(&PyType_Type, 0) ++ "google.protobuf.internal." ++ "cpp._message.RepeatedScalarContainer", // tp_name ++ sizeof(RepeatedScalarContainer), // tp_basicsize ++ 0, // tp_itemsize ++ (destructor)repeated_scalar_container::Dealloc, // tp_dealloc ++ 0, // tp_print ++ 0, // tp_getattr ++ 0, // tp_setattr ++ 0, // tp_compare ++ 0, // tp_repr ++ 0, // tp_as_number ++ &repeated_scalar_container::SqMethods, // tp_as_sequence ++ &repeated_scalar_container::MpMethods, // tp_as_mapping ++ 0, // tp_hash ++ 0, // tp_call ++ 0, // tp_str ++ 0, // tp_getattro ++ 0, // tp_setattro ++ 0, // tp_as_buffer ++ Py_TPFLAGS_DEFAULT, // tp_flags ++ "A Repeated scalar container", // tp_doc ++ 0, // tp_traverse ++ 0, // tp_clear ++ (richcmpfunc)repeated_scalar_container::RichCompare, // tp_richcompare ++ 0, // tp_weaklistoffset ++ 0, // tp_iter ++ 0, // tp_iternext ++ repeated_scalar_container::Methods, // tp_methods ++ 0, // tp_members ++ 0, // tp_getset ++ 0, // tp_base ++ 0, // tp_dict ++ 0, // tp_descr_get ++ 0, // tp_descr_set ++ 0, // tp_dictoffset ++ (initproc)repeated_scalar_container::Init, // tp_init ++}; ++ ++} // namespace python ++} // namespace protobuf ++} // namespace google +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/repeated_scalar_container.h +@@ -0,0 +1,112 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: anuraag@google.com (Anuraag Agrawal) ++// Author: tibell@google.com (Johan Tibell) ++ ++#ifndef GOOGLE_PROTOBUF_PYTHON_CPP_REPEATED_SCALAR_CONTAINER_H__ ++#define GOOGLE_PROTOBUF_PYTHON_CPP_REPEATED_SCALAR_CONTAINER_H__ ++ ++#include ++ ++#include ++#ifndef _SHARED_PTR_H ++#include ++#endif ++ ++ ++namespace google { ++namespace protobuf { ++ ++class Message; ++ ++using internal::shared_ptr; ++ ++namespace python { ++ ++struct CFieldDescriptor; ++struct CMessage; ++ ++typedef struct RepeatedScalarContainer { ++ PyObject_HEAD; ++ ++ // This is the top-level C++ Message object that owns the whole ++ // proto tree. Every Python RepeatedScalarContainer holds a ++ // reference to it in order to keep it alive as long as there's a ++ // Python object that references any part of the tree. ++ shared_ptr owner; ++ ++ // Pointer to the C++ Message that contains this container. The ++ // RepeatedScalarContainer does not own this pointer. ++ Message* message; ++ ++ // Weak reference to a parent CMessage object (i.e. may be NULL.) ++ // ++ // Used to make sure all ancestors are also mutable when first ++ // modifying the container. ++ CMessage* parent; ++ ++ // Weak reference to the parent's descriptor that describes this ++ // field. Used together with the parent's message when making a ++ // default message instance mutable. ++ CFieldDescriptor* parent_field; ++} RepeatedScalarContainer; ++ ++extern PyTypeObject RepeatedScalarContainer_Type; ++ ++namespace repeated_scalar_container { ++ ++// Appends the scalar 'item' to the end of the container 'self'. ++// ++// Returns None if successful; returns NULL and sets an exception if ++// unsuccessful. ++PyObject* Append(RepeatedScalarContainer* self, PyObject* item); ++ ++// Releases the messages in the container to a new message. ++// ++// Returns 0 on success, -1 on failure. ++int Release(RepeatedScalarContainer* self); ++ ++// Appends all the elements in the input iterator to the container. ++// ++// Returns None if successful; returns NULL and sets an exception if ++// unsuccessful. ++PyObject* Extend(RepeatedScalarContainer* self, PyObject* value); ++ ++// Set the owner field of self and any children of self. ++void SetOwner(RepeatedScalarContainer* self, ++ const shared_ptr& new_owner); ++ ++} // namespace repeated_scalar_container ++} // namespace python ++} // namespace protobuf ++ ++} // namespace google ++#endif // GOOGLE_PROTOBUF_PYTHON_CPP_REPEATED_SCALAR_CONTAINER_H__ +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/pyext/scoped_pyobject_ptr.h +@@ -0,0 +1,95 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2008 Google Inc. All rights reserved. ++// https://developers.google.com/protocol-buffers/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// Author: tibell@google.com (Johan Tibell) ++ ++#ifndef GOOGLE_PROTOBUF_PYTHON_CPP_SCOPED_PYOBJECT_PTR_H__ ++#define GOOGLE_PROTOBUF_PYTHON_CPP_SCOPED_PYOBJECT_PTR_H__ ++ ++#include ++ ++namespace google { ++class ScopedPyObjectPtr { ++ public: ++ // Constructor. Defaults to intializing with NULL. ++ // There is no way to create an uninitialized ScopedPyObjectPtr. ++ explicit ScopedPyObjectPtr(PyObject* p = NULL) : ptr_(p) { } ++ ++ // Destructor. If there is a PyObject object, delete it. ++ ~ScopedPyObjectPtr() { ++ Py_XDECREF(ptr_); ++ } ++ ++ // Reset. Deletes the current owned object, if any. ++ // Then takes ownership of a new object, if given. ++ // this->reset(this->get()) works. ++ PyObject* reset(PyObject* p = NULL) { ++ if (p != ptr_) { ++ Py_XDECREF(ptr_); ++ ptr_ = p; ++ } ++ return ptr_; ++ } ++ ++ // Releases ownership of the object. ++ PyObject* release() { ++ PyObject* p = ptr_; ++ ptr_ = NULL; ++ return p; ++ } ++ ++ operator PyObject*() { return ptr_; } ++ ++ PyObject* operator->() const { ++ assert(ptr_ != NULL); ++ return ptr_; ++ } ++ ++ PyObject* get() const { return ptr_; } ++ ++ Py_ssize_t refcnt() const { return Py_REFCNT(ptr_); } ++ ++ void inc() const { Py_INCREF(ptr_); } ++ ++ // Comparison operators. ++ // These return whether a ScopedPyObjectPtr and a raw pointer ++ // refer to the same object, not just to two different but equal ++ // objects. ++ bool operator==(const PyObject* p) const { return ptr_ == p; } ++ bool operator!=(const PyObject* p) const { return ptr_ != p; } ++ ++ private: ++ PyObject* ptr_; ++ ++ GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ScopedPyObjectPtr); ++}; ++ ++} // namespace google ++#endif // GOOGLE_PROTOBUF_PYTHON_CPP_SCOPED_PYOBJECT_PTR_H__ +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/reflection.py +@@ -0,0 +1,205 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# This code is meant to work on Python 2.4 and above only. ++ ++"""Contains a metaclass and helper functions used to create ++protocol message classes from Descriptor objects at runtime. ++ ++Recall that a metaclass is the "type" of a class. ++(A class is to a metaclass what an instance is to a class.) ++ ++In this case, we use the GeneratedProtocolMessageType metaclass ++to inject all the useful functionality into the classes ++output by the protocol compiler at compile-time. ++ ++The upshot of all this is that the real implementation ++details for ALL pure-Python protocol buffers are *here in ++this file*. ++""" ++ ++__author__ = 'robinson@google.com (Will Robinson)' ++ ++ ++from google.protobuf.internal import api_implementation ++from google.protobuf import descriptor as descriptor_mod ++from google.protobuf import message ++ ++_FieldDescriptor = descriptor_mod.FieldDescriptor ++ ++ ++if api_implementation.Type() == 'cpp': ++ if api_implementation.Version() == 2: ++ from google.protobuf.pyext import cpp_message ++ _NewMessage = cpp_message.NewMessage ++ _InitMessage = cpp_message.InitMessage ++ else: ++ from google.protobuf.internal import cpp_message ++ _NewMessage = cpp_message.NewMessage ++ _InitMessage = cpp_message.InitMessage ++else: ++ from google.protobuf.internal import python_message ++ _NewMessage = python_message.NewMessage ++ _InitMessage = python_message.InitMessage ++ ++ ++class GeneratedProtocolMessageType(type): ++ ++ """Metaclass for protocol message classes created at runtime from Descriptors. ++ ++ We add implementations for all methods described in the Message class. We ++ also create properties to allow getting/setting all fields in the protocol ++ message. Finally, we create slots to prevent users from accidentally ++ "setting" nonexistent fields in the protocol message, which then wouldn't get ++ serialized / deserialized properly. ++ ++ The protocol compiler currently uses this metaclass to create protocol ++ message classes at runtime. Clients can also manually create their own ++ classes at runtime, as in this example: ++ ++ mydescriptor = Descriptor(.....) ++ class MyProtoClass(Message): ++ __metaclass__ = GeneratedProtocolMessageType ++ DESCRIPTOR = mydescriptor ++ myproto_instance = MyProtoClass() ++ myproto.foo_field = 23 ++ ... ++ ++ The above example will not work for nested types. If you wish to include them, ++ use reflection.MakeClass() instead of manually instantiating the class in ++ order to create the appropriate class structure. ++ """ ++ ++ # Must be consistent with the protocol-compiler code in ++ # proto2/compiler/internal/generator.*. ++ _DESCRIPTOR_KEY = 'DESCRIPTOR' ++ ++ def __new__(cls, name, bases, dictionary): ++ """Custom allocation for runtime-generated class types. ++ ++ We override __new__ because this is apparently the only place ++ where we can meaningfully set __slots__ on the class we're creating(?). ++ (The interplay between metaclasses and slots is not very well-documented). ++ ++ Args: ++ name: Name of the class (ignored, but required by the ++ metaclass protocol). ++ bases: Base classes of the class we're constructing. ++ (Should be message.Message). We ignore this field, but ++ it's required by the metaclass protocol ++ dictionary: The class dictionary of the class we're ++ constructing. dictionary[_DESCRIPTOR_KEY] must contain ++ a Descriptor object describing this protocol message ++ type. ++ ++ Returns: ++ Newly-allocated class. ++ """ ++ descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] ++ bases = _NewMessage(bases, descriptor, dictionary) ++ superclass = super(GeneratedProtocolMessageType, cls) ++ ++ new_class = superclass.__new__(cls, name, bases, dictionary) ++ setattr(descriptor, '_concrete_class', new_class) ++ return new_class ++ ++ def __init__(cls, name, bases, dictionary): ++ """Here we perform the majority of our work on the class. ++ We add enum getters, an __init__ method, implementations ++ of all Message methods, and properties for all fields ++ in the protocol type. ++ ++ Args: ++ name: Name of the class (ignored, but required by the ++ metaclass protocol). ++ bases: Base classes of the class we're constructing. ++ (Should be message.Message). We ignore this field, but ++ it's required by the metaclass protocol ++ dictionary: The class dictionary of the class we're ++ constructing. dictionary[_DESCRIPTOR_KEY] must contain ++ a Descriptor object describing this protocol message ++ type. ++ """ ++ descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] ++ _InitMessage(descriptor, cls) ++ superclass = super(GeneratedProtocolMessageType, cls) ++ superclass.__init__(name, bases, dictionary) ++ ++ ++def ParseMessage(descriptor, byte_str): ++ """Generate a new Message instance from this Descriptor and a byte string. ++ ++ Args: ++ descriptor: Protobuf Descriptor object ++ byte_str: Serialized protocol buffer byte string ++ ++ Returns: ++ Newly created protobuf Message object. ++ """ ++ result_class = MakeClass(descriptor) ++ new_msg = result_class() ++ new_msg.ParseFromString(byte_str) ++ return new_msg ++ ++ ++def MakeClass(descriptor): ++ """Construct a class object for a protobuf described by descriptor. ++ ++ Composite descriptors are handled by defining the new class as a member of the ++ parent class, recursing as deep as necessary. ++ This is the dynamic equivalent to: ++ ++ class Parent(message.Message): ++ __metaclass__ = GeneratedProtocolMessageType ++ DESCRIPTOR = descriptor ++ class Child(message.Message): ++ __metaclass__ = GeneratedProtocolMessageType ++ DESCRIPTOR = descriptor.nested_types[0] ++ ++ Sample usage: ++ file_descriptor = descriptor_pb2.FileDescriptorProto() ++ file_descriptor.ParseFromString(proto2_string) ++ msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0]) ++ msg_class = reflection.MakeClass(msg_descriptor) ++ msg = msg_class() ++ ++ Args: ++ descriptor: A descriptor.Descriptor object describing the protobuf. ++ Returns: ++ The Message class object described by the descriptor. ++ """ ++ attributes = {} ++ for name, nested_type in descriptor.nested_types_by_name.items(): ++ attributes[name] = MakeClass(nested_type) ++ ++ attributes[GeneratedProtocolMessageType._DESCRIPTOR_KEY] = descriptor ++ ++ return GeneratedProtocolMessageType(str(descriptor.name), (message.Message,), ++ attributes) +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/service.py +@@ -0,0 +1,226 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""DEPRECATED: Declares the RPC service interfaces. ++ ++This module declares the abstract interfaces underlying proto2 RPC ++services. These are intended to be independent of any particular RPC ++implementation, so that proto2 services can be used on top of a variety ++of implementations. Starting with version 2.3.0, RPC implementations should ++not try to build on these, but should instead provide code generator plugins ++which generate code specific to the particular RPC implementation. This way ++the generated code can be more appropriate for the implementation in use ++and can avoid unnecessary layers of indirection. ++""" ++ ++__author__ = 'petar@google.com (Petar Petrov)' ++ ++ ++class RpcException(Exception): ++ """Exception raised on failed blocking RPC method call.""" ++ pass ++ ++ ++class Service(object): ++ ++ """Abstract base interface for protocol-buffer-based RPC services. ++ ++ Services themselves are abstract classes (implemented either by servers or as ++ stubs), but they subclass this base interface. The methods of this ++ interface can be used to call the methods of the service without knowing ++ its exact type at compile time (analogous to the Message interface). ++ """ ++ ++ def GetDescriptor(): ++ """Retrieves this service's descriptor.""" ++ raise NotImplementedError ++ ++ def CallMethod(self, method_descriptor, rpc_controller, ++ request, done): ++ """Calls a method of the service specified by method_descriptor. ++ ++ If "done" is None then the call is blocking and the response ++ message will be returned directly. Otherwise the call is asynchronous ++ and "done" will later be called with the response value. ++ ++ In the blocking case, RpcException will be raised on error. ++ ++ Preconditions: ++ * method_descriptor.service == GetDescriptor ++ * request is of the exact same classes as returned by ++ GetRequestClass(method). ++ * After the call has started, the request must not be modified. ++ * "rpc_controller" is of the correct type for the RPC implementation being ++ used by this Service. For stubs, the "correct type" depends on the ++ RpcChannel which the stub is using. ++ ++ Postconditions: ++ * "done" will be called when the method is complete. This may be ++ before CallMethod() returns or it may be at some point in the future. ++ * If the RPC failed, the response value passed to "done" will be None. ++ Further details about the failure can be found by querying the ++ RpcController. ++ """ ++ raise NotImplementedError ++ ++ def GetRequestClass(self, method_descriptor): ++ """Returns the class of the request message for the specified method. ++ ++ CallMethod() requires that the request is of a particular subclass of ++ Message. GetRequestClass() gets the default instance of this required ++ type. ++ ++ Example: ++ method = service.GetDescriptor().FindMethodByName("Foo") ++ request = stub.GetRequestClass(method)() ++ request.ParseFromString(input) ++ service.CallMethod(method, request, callback) ++ """ ++ raise NotImplementedError ++ ++ def GetResponseClass(self, method_descriptor): ++ """Returns the class of the response message for the specified method. ++ ++ This method isn't really needed, as the RpcChannel's CallMethod constructs ++ the response protocol message. It's provided anyway in case it is useful ++ for the caller to know the response type in advance. ++ """ ++ raise NotImplementedError ++ ++ ++class RpcController(object): ++ ++ """An RpcController mediates a single method call. ++ ++ The primary purpose of the controller is to provide a way to manipulate ++ settings specific to the RPC implementation and to find out about RPC-level ++ errors. The methods provided by the RpcController interface are intended ++ to be a "least common denominator" set of features which we expect all ++ implementations to support. Specific implementations may provide more ++ advanced features (e.g. deadline propagation). ++ """ ++ ++ # Client-side methods below ++ ++ def Reset(self): ++ """Resets the RpcController to its initial state. ++ ++ After the RpcController has been reset, it may be reused in ++ a new call. Must not be called while an RPC is in progress. ++ """ ++ raise NotImplementedError ++ ++ def Failed(self): ++ """Returns true if the call failed. ++ ++ After a call has finished, returns true if the call failed. The possible ++ reasons for failure depend on the RPC implementation. Failed() must not ++ be called before a call has finished. If Failed() returns true, the ++ contents of the response message are undefined. ++ """ ++ raise NotImplementedError ++ ++ def ErrorText(self): ++ """If Failed is true, returns a human-readable description of the error.""" ++ raise NotImplementedError ++ ++ def StartCancel(self): ++ """Initiate cancellation. ++ ++ Advises the RPC system that the caller desires that the RPC call be ++ canceled. The RPC system may cancel it immediately, may wait awhile and ++ then cancel it, or may not even cancel the call at all. If the call is ++ canceled, the "done" callback will still be called and the RpcController ++ will indicate that the call failed at that time. ++ """ ++ raise NotImplementedError ++ ++ # Server-side methods below ++ ++ def SetFailed(self, reason): ++ """Sets a failure reason. ++ ++ Causes Failed() to return true on the client side. "reason" will be ++ incorporated into the message returned by ErrorText(). If you find ++ you need to return machine-readable information about failures, you ++ should incorporate it into your response protocol buffer and should ++ NOT call SetFailed(). ++ """ ++ raise NotImplementedError ++ ++ def IsCanceled(self): ++ """Checks if the client cancelled the RPC. ++ ++ If true, indicates that the client canceled the RPC, so the server may ++ as well give up on replying to it. The server should still call the ++ final "done" callback. ++ """ ++ raise NotImplementedError ++ ++ def NotifyOnCancel(self, callback): ++ """Sets a callback to invoke on cancel. ++ ++ Asks that the given callback be called when the RPC is canceled. The ++ callback will always be called exactly once. If the RPC completes without ++ being canceled, the callback will be called after completion. If the RPC ++ has already been canceled when NotifyOnCancel() is called, the callback ++ will be called immediately. ++ ++ NotifyOnCancel() must be called no more than once per request. ++ """ ++ raise NotImplementedError ++ ++ ++class RpcChannel(object): ++ ++ """Abstract interface for an RPC channel. ++ ++ An RpcChannel represents a communication line to a service which can be used ++ to call that service's methods. The service may be running on another ++ machine. Normally, you should not use an RpcChannel directly, but instead ++ construct a stub {@link Service} wrapping it. Example: ++ ++ Example: ++ RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234") ++ RpcController controller = rpcImpl.Controller() ++ MyService service = MyService_Stub(channel) ++ service.MyMethod(controller, request, callback) ++ """ ++ ++ def CallMethod(self, method_descriptor, rpc_controller, ++ request, response_class, done): ++ """Calls the method identified by the descriptor. ++ ++ Call the given method of the remote service. The signature of this ++ procedure looks the same as Service.CallMethod(), but the requirements ++ are less strict in one important way: the request object doesn't have to ++ be of any specific class as long as its descriptor is method.input_type. ++ """ ++ raise NotImplementedError +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/service_reflection.py +@@ -0,0 +1,284 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""Contains metaclasses used to create protocol service and service stub ++classes from ServiceDescriptor objects at runtime. ++ ++The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to ++inject all useful functionality into the classes output by the protocol ++compiler at compile-time. ++""" ++ ++__author__ = 'petar@google.com (Petar Petrov)' ++ ++ ++class GeneratedServiceType(type): ++ ++ """Metaclass for service classes created at runtime from ServiceDescriptors. ++ ++ Implementations for all methods described in the Service class are added here ++ by this class. We also create properties to allow getting/setting all fields ++ in the protocol message. ++ ++ The protocol compiler currently uses this metaclass to create protocol service ++ classes at runtime. Clients can also manually create their own classes at ++ runtime, as in this example: ++ ++ mydescriptor = ServiceDescriptor(.....) ++ class MyProtoService(service.Service): ++ __metaclass__ = GeneratedServiceType ++ DESCRIPTOR = mydescriptor ++ myservice_instance = MyProtoService() ++ ... ++ """ ++ ++ _DESCRIPTOR_KEY = 'DESCRIPTOR' ++ ++ def __init__(cls, name, bases, dictionary): ++ """Creates a message service class. ++ ++ Args: ++ name: Name of the class (ignored, but required by the metaclass ++ protocol). ++ bases: Base classes of the class being constructed. ++ dictionary: The class dictionary of the class being constructed. ++ dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object ++ describing this protocol service type. ++ """ ++ # Don't do anything if this class doesn't have a descriptor. This happens ++ # when a service class is subclassed. ++ if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary: ++ return ++ descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY] ++ service_builder = _ServiceBuilder(descriptor) ++ service_builder.BuildService(cls) ++ ++ ++class GeneratedServiceStubType(GeneratedServiceType): ++ ++ """Metaclass for service stubs created at runtime from ServiceDescriptors. ++ ++ This class has similar responsibilities as GeneratedServiceType, except that ++ it creates the service stub classes. ++ """ ++ ++ _DESCRIPTOR_KEY = 'DESCRIPTOR' ++ ++ def __init__(cls, name, bases, dictionary): ++ """Creates a message service stub class. ++ ++ Args: ++ name: Name of the class (ignored, here). ++ bases: Base classes of the class being constructed. ++ dictionary: The class dictionary of the class being constructed. ++ dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object ++ describing this protocol service type. ++ """ ++ super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary) ++ # Don't do anything if this class doesn't have a descriptor. This happens ++ # when a service stub is subclassed. ++ if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary: ++ return ++ descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY] ++ service_stub_builder = _ServiceStubBuilder(descriptor) ++ service_stub_builder.BuildServiceStub(cls) ++ ++ ++class _ServiceBuilder(object): ++ ++ """This class constructs a protocol service class using a service descriptor. ++ ++ Given a service descriptor, this class constructs a class that represents ++ the specified service descriptor. One service builder instance constructs ++ exactly one service class. That means all instances of that class share the ++ same builder. ++ """ ++ ++ def __init__(self, service_descriptor): ++ """Initializes an instance of the service class builder. ++ ++ Args: ++ service_descriptor: ServiceDescriptor to use when constructing the ++ service class. ++ """ ++ self.descriptor = service_descriptor ++ ++ def BuildService(self, cls): ++ """Constructs the service class. ++ ++ Args: ++ cls: The class that will be constructed. ++ """ ++ ++ # CallMethod needs to operate with an instance of the Service class. This ++ # internal wrapper function exists only to be able to pass the service ++ # instance to the method that does the real CallMethod work. ++ def _WrapCallMethod(srvc, method_descriptor, ++ rpc_controller, request, callback): ++ return self._CallMethod(srvc, method_descriptor, ++ rpc_controller, request, callback) ++ self.cls = cls ++ cls.CallMethod = _WrapCallMethod ++ cls.GetDescriptor = staticmethod(lambda: self.descriptor) ++ cls.GetDescriptor.__doc__ = "Returns the service descriptor." ++ cls.GetRequestClass = self._GetRequestClass ++ cls.GetResponseClass = self._GetResponseClass ++ for method in self.descriptor.methods: ++ setattr(cls, method.name, self._GenerateNonImplementedMethod(method)) ++ ++ def _CallMethod(self, srvc, method_descriptor, ++ rpc_controller, request, callback): ++ """Calls the method described by a given method descriptor. ++ ++ Args: ++ srvc: Instance of the service for which this method is called. ++ method_descriptor: Descriptor that represent the method to call. ++ rpc_controller: RPC controller to use for this method's execution. ++ request: Request protocol message. ++ callback: A callback to invoke after the method has completed. ++ """ ++ if method_descriptor.containing_service != self.descriptor: ++ raise RuntimeError( ++ 'CallMethod() given method descriptor for wrong service type.') ++ method = getattr(srvc, method_descriptor.name) ++ return method(rpc_controller, request, callback) ++ ++ def _GetRequestClass(self, method_descriptor): ++ """Returns the class of the request protocol message. ++ ++ Args: ++ method_descriptor: Descriptor of the method for which to return the ++ request protocol message class. ++ ++ Returns: ++ A class that represents the input protocol message of the specified ++ method. ++ """ ++ if method_descriptor.containing_service != self.descriptor: ++ raise RuntimeError( ++ 'GetRequestClass() given method descriptor for wrong service type.') ++ return method_descriptor.input_type._concrete_class ++ ++ def _GetResponseClass(self, method_descriptor): ++ """Returns the class of the response protocol message. ++ ++ Args: ++ method_descriptor: Descriptor of the method for which to return the ++ response protocol message class. ++ ++ Returns: ++ A class that represents the output protocol message of the specified ++ method. ++ """ ++ if method_descriptor.containing_service != self.descriptor: ++ raise RuntimeError( ++ 'GetResponseClass() given method descriptor for wrong service type.') ++ return method_descriptor.output_type._concrete_class ++ ++ def _GenerateNonImplementedMethod(self, method): ++ """Generates and returns a method that can be set for a service methods. ++ ++ Args: ++ method: Descriptor of the service method for which a method is to be ++ generated. ++ ++ Returns: ++ A method that can be added to the service class. ++ """ ++ return lambda inst, rpc_controller, request, callback: ( ++ self._NonImplementedMethod(method.name, rpc_controller, callback)) ++ ++ def _NonImplementedMethod(self, method_name, rpc_controller, callback): ++ """The body of all methods in the generated service class. ++ ++ Args: ++ method_name: Name of the method being executed. ++ rpc_controller: RPC controller used to execute this method. ++ callback: A callback which will be invoked when the method finishes. ++ """ ++ rpc_controller.SetFailed('Method %s not implemented.' % method_name) ++ callback(None) ++ ++ ++class _ServiceStubBuilder(object): ++ ++ """Constructs a protocol service stub class using a service descriptor. ++ ++ Given a service descriptor, this class constructs a suitable stub class. ++ A stub is just a type-safe wrapper around an RpcChannel which emulates a ++ local implementation of the service. ++ ++ One service stub builder instance constructs exactly one class. It means all ++ instances of that class share the same service stub builder. ++ """ ++ ++ def __init__(self, service_descriptor): ++ """Initializes an instance of the service stub class builder. ++ ++ Args: ++ service_descriptor: ServiceDescriptor to use when constructing the ++ stub class. ++ """ ++ self.descriptor = service_descriptor ++ ++ def BuildServiceStub(self, cls): ++ """Constructs the stub class. ++ ++ Args: ++ cls: The class that will be constructed. ++ """ ++ ++ def _ServiceStubInit(stub, rpc_channel): ++ stub.rpc_channel = rpc_channel ++ self.cls = cls ++ cls.__init__ = _ServiceStubInit ++ for method in self.descriptor.methods: ++ setattr(cls, method.name, self._GenerateStubMethod(method)) ++ ++ def _GenerateStubMethod(self, method): ++ return (lambda inst, rpc_controller, request, callback=None: ++ self._StubMethod(inst, method, rpc_controller, request, callback)) ++ ++ def _StubMethod(self, stub, method_descriptor, ++ rpc_controller, request, callback): ++ """The body of all service methods in the generated stub class. ++ ++ Args: ++ stub: Stub instance. ++ method_descriptor: Descriptor of the invoked method. ++ rpc_controller: Rpc controller to execute the method. ++ request: Request protocol message. ++ callback: A callback to execute when the method finishes. ++ Returns: ++ Response message (in case of blocking call). ++ """ ++ return stub.rpc_channel.CallMethod( ++ method_descriptor, rpc_controller, request, ++ method_descriptor.output_type._concrete_class, callback) +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/symbol_database.py +@@ -0,0 +1,185 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++"""A database of Python protocol buffer generated symbols. ++ ++SymbolDatabase makes it easy to create new instances of a registered type, given ++only the type's protocol buffer symbol name. Once all symbols are registered, ++they can be accessed using either the MessageFactory interface which ++SymbolDatabase exposes, or the DescriptorPool interface of the underlying ++pool. ++ ++Example usage: ++ ++ db = symbol_database.SymbolDatabase() ++ ++ # Register symbols of interest, from one or multiple files. ++ db.RegisterFileDescriptor(my_proto_pb2.DESCRIPTOR) ++ db.RegisterMessage(my_proto_pb2.MyMessage) ++ db.RegisterEnumDescriptor(my_proto_pb2.MyEnum.DESCRIPTOR) ++ ++ # The database can be used as a MessageFactory, to generate types based on ++ # their name: ++ types = db.GetMessages(['my_proto.proto']) ++ my_message_instance = types['MyMessage']() ++ ++ # The database's underlying descriptor pool can be queried, so it's not ++ # necessary to know a type's filename to be able to generate it: ++ filename = db.pool.FindFileContainingSymbol('MyMessage') ++ my_message_instance = db.GetMessages([filename])['MyMessage']() ++ ++ # This functionality is also provided directly via a convenience method: ++ my_message_instance = db.GetSymbol('MyMessage')() ++""" ++ ++ ++from google.protobuf import descriptor_pool ++ ++ ++class SymbolDatabase(object): ++ """A database of Python generated symbols. ++ ++ SymbolDatabase also models message_factory.MessageFactory. ++ ++ The symbol database can be used to keep a global registry of all protocol ++ buffer types used within a program. ++ """ ++ ++ def __init__(self): ++ """Constructor.""" ++ ++ self._symbols = {} ++ self._symbols_by_file = {} ++ self.pool = descriptor_pool.DescriptorPool() ++ ++ def RegisterMessage(self, message): ++ """Registers the given message type in the local database. ++ ++ Args: ++ message: a message.Message, to be registered. ++ ++ Returns: ++ The provided message. ++ """ ++ ++ desc = message.DESCRIPTOR ++ self._symbols[desc.full_name] = message ++ if desc.file.name not in self._symbols_by_file: ++ self._symbols_by_file[desc.file.name] = {} ++ self._symbols_by_file[desc.file.name][desc.full_name] = message ++ self.pool.AddDescriptor(desc) ++ return message ++ ++ def RegisterEnumDescriptor(self, enum_descriptor): ++ """Registers the given enum descriptor in the local database. ++ ++ Args: ++ enum_descriptor: a descriptor.EnumDescriptor. ++ ++ Returns: ++ The provided descriptor. ++ """ ++ self.pool.AddEnumDescriptor(enum_descriptor) ++ return enum_descriptor ++ ++ def RegisterFileDescriptor(self, file_descriptor): ++ """Registers the given file descriptor in the local database. ++ ++ Args: ++ file_descriptor: a descriptor.FileDescriptor. ++ ++ Returns: ++ The provided descriptor. ++ """ ++ self.pool.AddFileDescriptor(file_descriptor) ++ ++ def GetSymbol(self, symbol): ++ """Tries to find a symbol in the local database. ++ ++ Currently, this method only returns message.Message instances, however, if ++ may be extended in future to support other symbol types. ++ ++ Args: ++ symbol: A str, a protocol buffer symbol. ++ ++ Returns: ++ A Python class corresponding to the symbol. ++ ++ Raises: ++ KeyError: if the symbol could not be found. ++ """ ++ ++ return self._symbols[symbol] ++ ++ def GetPrototype(self, descriptor): ++ """Builds a proto2 message class based on the passed in descriptor. ++ ++ Passing a descriptor with a fully qualified name matching a previous ++ invocation will cause the same class to be returned. ++ ++ Args: ++ descriptor: The descriptor to build from. ++ ++ Returns: ++ A class describing the passed in descriptor. ++ """ ++ ++ return self.GetSymbol(descriptor.full_name) ++ ++ def GetMessages(self, files): ++ """Gets all the messages from a specified file. ++ ++ This will find and resolve dependencies, failing if they are not registered ++ in the symbol database. ++ ++ ++ Args: ++ files: The file names to extract messages from. ++ ++ Returns: ++ A dictionary mapping proto names to the message classes. This will include ++ any dependent messages as well as any messages defined in the same file as ++ a specified message. ++ ++ Raises: ++ KeyError: if a file could not be found. ++ """ ++ ++ result = {} ++ for f in files: ++ result.update(self._symbols_by_file[f]) ++ return result ++ ++_DEFAULT = SymbolDatabase() ++ ++ ++def Default(): ++ """Returns the default SymbolDatabase.""" ++ return _DEFAULT +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/text_encoding.py +@@ -0,0 +1,106 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++"""Encoding related utilities.""" ++import re ++ ++import six ++ ++# Lookup table for utf8 ++_cescape_utf8_to_str = [chr(i) for i in range(0, 256)] ++_cescape_utf8_to_str[9] = r'\t' # optional escape ++_cescape_utf8_to_str[10] = r'\n' # optional escape ++_cescape_utf8_to_str[13] = r'\r' # optional escape ++_cescape_utf8_to_str[39] = r"\'" # optional escape ++ ++_cescape_utf8_to_str[34] = r'\"' # necessary escape ++_cescape_utf8_to_str[92] = r'\\' # necessary escape ++ ++# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32) ++_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] + ++ [chr(i) for i in range(32, 127)] + ++ [r'\%03o' % i for i in range(127, 256)]) ++_cescape_byte_to_str[9] = r'\t' # optional escape ++_cescape_byte_to_str[10] = r'\n' # optional escape ++_cescape_byte_to_str[13] = r'\r' # optional escape ++_cescape_byte_to_str[39] = r"\'" # optional escape ++ ++_cescape_byte_to_str[34] = r'\"' # necessary escape ++_cescape_byte_to_str[92] = r'\\' # necessary escape ++ ++ ++def CEscape(text, as_utf8): ++ """Escape a bytes string for use in an ascii protocol buffer. ++ ++ text.encode('string_escape') does not seem to satisfy our needs as it ++ encodes unprintable characters using two-digit hex escapes whereas our ++ C++ unescaping function allows hex escapes to be any length. So, ++ "\0011".encode('string_escape') ends up being "\\x011", which will be ++ decoded in C++ as a single-character string with char code 0x11. ++ ++ Args: ++ text: A byte string to be escaped ++ as_utf8: Specifies if result should be returned in UTF-8 encoding ++ Returns: ++ Escaped string ++ """ ++ # PY3 hack: make Ord work for str and bytes: ++ # //platforms/networking/data uses unicode here, hence basestring. ++ Ord = ord if isinstance(text, six.string_types) else lambda x: x ++ if as_utf8: ++ return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text) ++ return ''.join(_cescape_byte_to_str[Ord(c)] for c in text) ++ ++ ++_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])') ++_cescape_highbit_to_str = ([chr(i) for i in range(0, 127)] + ++ [r'\%03o' % i for i in range(127, 256)]) ++ ++ ++def CUnescape(text): ++ """Unescape a text string with C-style escape sequences to UTF-8 bytes.""" ++ ++ def ReplaceHex(m): ++ # Only replace the match if the number of leading back slashes is odd. i.e. ++ # the slash itself is not escaped. ++ if len(m.group(1)) & 1: ++ return m.group(1) + 'x0' + m.group(2) ++ return m.group(0) ++ ++ # This is required because the 'string_escape' encoding doesn't ++ # allow single-digit hex escapes (like '\xf'). ++ result = _CUNESCAPE_HEX.sub(ReplaceHex, text) ++ ++ if str is bytes: # PY2 ++ return result.decode('string_escape') ++ result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result) ++ return (result.encode('ascii') # Make it bytes to allow decode. ++ .decode('unicode_escape') ++ # Make it bytes again to return the proper type. ++ .encode('raw_unicode_escape')) +--- /dev/null ++++ protobuf-2.6.1/python3/google/protobuf/text_format.py +@@ -0,0 +1,875 @@ ++# Protocol Buffers - Google's data interchange format ++# Copyright 2008 Google Inc. All rights reserved. ++# https://developers.google.com/protocol-buffers/ ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are ++# met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above ++# copyright notice, this list of conditions and the following disclaimer ++# in the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Google Inc. nor the names of its ++# contributors may be used to endorse or promote products derived from ++# this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# Copyright 2007 Google Inc. All Rights Reserved. ++ ++"""Contains routines for printing protocol messages in text format.""" ++ ++__author__ = 'kenton@google.com (Kenton Varda)' ++ ++import io ++import re ++ ++import six ++ ++if six.PY3: ++ long = int ++ ++from google.protobuf.internal import type_checkers ++from google.protobuf import descriptor ++from google.protobuf import text_encoding ++ ++__all__ = ['MessageToString', 'PrintMessage', 'PrintField', ++ 'PrintFieldValue', 'Merge'] ++ ++ ++_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(), ++ type_checkers.Int32ValueChecker(), ++ type_checkers.Uint64ValueChecker(), ++ type_checkers.Int64ValueChecker()) ++_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?', re.IGNORECASE) ++_FLOAT_NAN = re.compile('nanf?', re.IGNORECASE) ++_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT, ++ descriptor.FieldDescriptor.CPPTYPE_DOUBLE]) ++ ++ ++class Error(Exception): ++ """Top-level module error for text_format.""" ++ ++ ++class ParseError(Error): ++ """Thrown in case of ASCII parsing error.""" ++ ++ ++def MessageToString(message, as_utf8=False, as_one_line=False, ++ pointy_brackets=False, use_index_order=False, ++ float_format=None): ++ """Convert protobuf message to text format. ++ ++ Floating point values can be formatted compactly with 15 digits of ++ precision (which is the most that IEEE 754 "double" can guarantee) ++ using float_format='.15g'. ++ ++ Args: ++ message: The protocol buffers message. ++ as_utf8: Produce text output in UTF8 format. ++ as_one_line: Don't introduce newlines between fields. ++ pointy_brackets: If True, use angle brackets instead of curly braces for ++ nesting. ++ use_index_order: If True, print fields of a proto message using the order ++ defined in source code instead of the field number. By default, use the ++ field number order. ++ float_format: If set, use this to specify floating point number formatting ++ (per the "Format Specification Mini-Language"); otherwise, str() is used. ++ ++ Returns: ++ A string of the text formatted protocol buffer message. ++ """ ++ out = io.BytesIO() ++ PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line, ++ pointy_brackets=pointy_brackets, ++ use_index_order=use_index_order, ++ float_format=float_format) ++ result = out.getvalue() ++ out.close() ++ if as_one_line: ++ return result.rstrip() ++ return result ++ ++ ++def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False, ++ pointy_brackets=False, use_index_order=False, ++ float_format=None): ++ fields = message.ListFields() ++ if use_index_order: ++ fields.sort(key=lambda x: x[0].index) ++ for field, value in fields: ++ if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: ++ for element in value: ++ PrintField(field, element, out, indent, as_utf8, as_one_line, ++ pointy_brackets=pointy_brackets, ++ float_format=float_format) ++ else: ++ PrintField(field, value, out, indent, as_utf8, as_one_line, ++ pointy_brackets=pointy_brackets, ++ float_format=float_format) ++ ++ ++def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False, ++ pointy_brackets=False, float_format=None): ++ """Print a single field name/value pair. For repeated fields, the value ++ should be a single element.""" ++ ++ out.write(' ' * indent) ++ if field.is_extension: ++ out.write('[') ++ if (field.containing_type.GetOptions().message_set_wire_format and ++ field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and ++ field.message_type == field.extension_scope and ++ field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL): ++ out.write(field.message_type.full_name) ++ else: ++ out.write(field.full_name) ++ out.write(']') ++ elif field.type == descriptor.FieldDescriptor.TYPE_GROUP: ++ # For groups, use the capitalized name. ++ out.write(field.message_type.name) ++ else: ++ out.write(field.name) ++ ++ if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE: ++ # The colon is optional in this case, but our cross-language golden files ++ # don't include it. ++ out.write(': ') ++ ++ PrintFieldValue(field, value, out, indent, as_utf8, as_one_line, ++ pointy_brackets=pointy_brackets, ++ float_format=float_format) ++ if as_one_line: ++ out.write(' ') ++ else: ++ out.write('\n') ++ ++ ++def PrintFieldValue(field, value, out, indent=0, as_utf8=False, ++ as_one_line=False, pointy_brackets=False, ++ float_format=None): ++ """Print a single field value (not including name). For repeated fields, ++ the value should be a single element.""" ++ ++ if pointy_brackets: ++ openb = '<' ++ closeb = '>' ++ else: ++ openb = '{' ++ closeb = '}' ++ ++ if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: ++ if as_one_line: ++ out.write(' %s ' % openb) ++ PrintMessage(value, out, indent, as_utf8, as_one_line, ++ pointy_brackets=pointy_brackets, ++ float_format=float_format) ++ out.write(closeb) ++ else: ++ out.write(' %s\n' % openb) ++ PrintMessage(value, out, indent + 2, as_utf8, as_one_line, ++ pointy_brackets=pointy_brackets, ++ float_format=float_format) ++ out.write(' ' * indent + closeb) ++ elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: ++ enum_value = field.enum_type.values_by_number.get(value, None) ++ if enum_value is not None: ++ out.write(enum_value.name) ++ else: ++ out.write(str(value)) ++ elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: ++ out.write('\"') ++ if isinstance(value, six.text_type): ++ out_value = value.encode('utf-8') ++ else: ++ out_value = value ++ if field.type == descriptor.FieldDescriptor.TYPE_BYTES: ++ # We need to escape non-UTF8 chars in TYPE_BYTES field. ++ out_as_utf8 = False ++ else: ++ out_as_utf8 = as_utf8 ++ out.write(text_encoding.CEscape(out_value, out_as_utf8)) ++ out.write('\"') ++ elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: ++ if value: ++ out.write('true') ++ else: ++ out.write('false') ++ elif field.cpp_type in _FLOAT_TYPES and float_format is not None: ++ out.write('{1:{0}}'.format(float_format, value)) ++ else: ++ out.write(str(value)) ++ ++ ++def _ParseOrMerge(lines, message, allow_multiple_scalars): ++ """Converts an ASCII representation of a protocol message into a message. ++ ++ Args: ++ lines: Lines of a message's ASCII representation. ++ message: A protocol buffer message to merge into. ++ allow_multiple_scalars: Determines if repeated values for a non-repeated ++ field are permitted, e.g., the string "foo: 1 foo: 2" for a ++ required/optional field named "foo". ++ ++ Raises: ++ ParseError: On ASCII parsing problems. ++ """ ++ tokenizer = _Tokenizer(lines) ++ while not tokenizer.AtEnd(): ++ _MergeField(tokenizer, message, allow_multiple_scalars) ++ ++ ++def Parse(text, message): ++ """Parses an ASCII representation of a protocol message into a message. ++ ++ Args: ++ text: Message ASCII representation. ++ message: A protocol buffer message to merge into. ++ ++ Returns: ++ The same message passed as argument. ++ ++ Raises: ++ ParseError: On ASCII parsing problems. ++ """ ++ if not isinstance(text, str): text = text.decode('utf-8') ++ return ParseLines(text.split('\n'), message) ++ ++ ++def Merge(text, message): ++ """Parses an ASCII representation of a protocol message into a message. ++ ++ Like Parse(), but allows repeated values for a non-repeated field, and uses ++ the last one. ++ ++ Args: ++ text: Message ASCII representation. ++ message: A protocol buffer message to merge into. ++ ++ Returns: ++ The same message passed as argument. ++ ++ Raises: ++ ParseError: On ASCII parsing problems. ++ """ ++ return MergeLines(text.split('\n'), message) ++ ++ ++def ParseLines(lines, message): ++ """Parses an ASCII representation of a protocol message into a message. ++ ++ Args: ++ lines: An iterable of lines of a message's ASCII representation. ++ message: A protocol buffer message to merge into. ++ ++ Returns: ++ The same message passed as argument. ++ ++ Raises: ++ ParseError: On ASCII parsing problems. ++ """ ++ _ParseOrMerge(lines, message, False) ++ return message ++ ++ ++def MergeLines(lines, message): ++ """Parses an ASCII representation of a protocol message into a message. ++ ++ Args: ++ lines: An iterable of lines of a message's ASCII representation. ++ message: A protocol buffer message to merge into. ++ ++ Returns: ++ The same message passed as argument. ++ ++ Raises: ++ ParseError: On ASCII parsing problems. ++ """ ++ _ParseOrMerge(lines, message, True) ++ return message ++ ++ ++def _MergeField(tokenizer, message, allow_multiple_scalars): ++ """Merges a single protocol message field into a message. ++ ++ Args: ++ tokenizer: A tokenizer to parse the field name and values. ++ message: A protocol message to record the data. ++ allow_multiple_scalars: Determines if repeated values for a non-repeated ++ field are permitted, e.g., the string "foo: 1 foo: 2" for a ++ required/optional field named "foo". ++ ++ Raises: ++ ParseError: In case of ASCII parsing problems. ++ """ ++ message_descriptor = message.DESCRIPTOR ++ if tokenizer.TryConsume('['): ++ name = [tokenizer.ConsumeIdentifier()] ++ while tokenizer.TryConsume('.'): ++ name.append(tokenizer.ConsumeIdentifier()) ++ name = '.'.join(name) ++ ++ if not message_descriptor.is_extendable: ++ raise tokenizer.ParseErrorPreviousToken( ++ 'Message type "%s" does not have extensions.' % ++ message_descriptor.full_name) ++ # pylint: disable=protected-access ++ field = message.Extensions._FindExtensionByName(name) ++ # pylint: enable=protected-access ++ if not field: ++ raise tokenizer.ParseErrorPreviousToken( ++ 'Extension "%s" not registered.' % name) ++ elif message_descriptor != field.containing_type: ++ raise tokenizer.ParseErrorPreviousToken( ++ 'Extension "%s" does not extend message type "%s".' % ( ++ name, message_descriptor.full_name)) ++ tokenizer.Consume(']') ++ else: ++ name = tokenizer.ConsumeIdentifier() ++ field = message_descriptor.fields_by_name.get(name, None) ++ ++ # Group names are expected to be capitalized as they appear in the ++ # .proto file, which actually matches their type names, not their field ++ # names. ++ if not field: ++ field = message_descriptor.fields_by_name.get(name.lower(), None) ++ if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP: ++ field = None ++ ++ if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and ++ field.message_type.name != name): ++ field = None ++ ++ if not field: ++ raise tokenizer.ParseErrorPreviousToken( ++ 'Message type "%s" has no field named "%s".' % ( ++ message_descriptor.full_name, name)) ++ ++ if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: ++ tokenizer.TryConsume(':') ++ ++ if tokenizer.TryConsume('<'): ++ end_token = '>' ++ else: ++ tokenizer.Consume('{') ++ end_token = '}' ++ ++ if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: ++ if field.is_extension: ++ sub_message = message.Extensions[field].add() ++ else: ++ sub_message = getattr(message, field.name).add() ++ else: ++ if field.is_extension: ++ sub_message = message.Extensions[field] ++ else: ++ sub_message = getattr(message, field.name) ++ sub_message.SetInParent() ++ ++ while not tokenizer.TryConsume(end_token): ++ if tokenizer.AtEnd(): ++ raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token)) ++ _MergeField(tokenizer, sub_message, allow_multiple_scalars) ++ else: ++ _MergeScalarField(tokenizer, message, field, allow_multiple_scalars) ++ ++ # For historical reasons, fields may optionally be separated by commas or ++ # semicolons. ++ if not tokenizer.TryConsume(','): ++ tokenizer.TryConsume(';') ++ ++ ++def _MergeScalarField(tokenizer, message, field, allow_multiple_scalars): ++ """Merges a single protocol message scalar field into a message. ++ ++ Args: ++ tokenizer: A tokenizer to parse the field value. ++ message: A protocol message to record the data. ++ field: The descriptor of the field to be merged. ++ allow_multiple_scalars: Determines if repeated values for a non-repeated ++ field are permitted, e.g., the string "foo: 1 foo: 2" for a ++ required/optional field named "foo". ++ ++ Raises: ++ ParseError: In case of ASCII parsing problems. ++ RuntimeError: On runtime errors. ++ """ ++ tokenizer.Consume(':') ++ value = None ++ ++ if field.type in (descriptor.FieldDescriptor.TYPE_INT32, ++ descriptor.FieldDescriptor.TYPE_SINT32, ++ descriptor.FieldDescriptor.TYPE_SFIXED32): ++ value = tokenizer.ConsumeInt32() ++ elif field.type in (descriptor.FieldDescriptor.TYPE_INT64, ++ descriptor.FieldDescriptor.TYPE_SINT64, ++ descriptor.FieldDescriptor.TYPE_SFIXED64): ++ value = tokenizer.ConsumeInt64() ++ elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32, ++ descriptor.FieldDescriptor.TYPE_FIXED32): ++ value = tokenizer.ConsumeUint32() ++ elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64, ++ descriptor.FieldDescriptor.TYPE_FIXED64): ++ value = tokenizer.ConsumeUint64() ++ elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT, ++ descriptor.FieldDescriptor.TYPE_DOUBLE): ++ value = tokenizer.ConsumeFloat() ++ elif field.type == descriptor.FieldDescriptor.TYPE_BOOL: ++ value = tokenizer.ConsumeBool() ++ elif field.type == descriptor.FieldDescriptor.TYPE_STRING: ++ value = tokenizer.ConsumeString() ++ elif field.type == descriptor.FieldDescriptor.TYPE_BYTES: ++ value = tokenizer.ConsumeByteString() ++ elif field.type == descriptor.FieldDescriptor.TYPE_ENUM: ++ value = tokenizer.ConsumeEnum(field) ++ else: ++ raise RuntimeError('Unknown field type %d' % field.type) ++ ++ if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: ++ if field.is_extension: ++ message.Extensions[field].append(value) ++ else: ++ getattr(message, field.name).append(value) ++ else: ++ if field.is_extension: ++ if not allow_multiple_scalars and message.HasExtension(field): ++ raise tokenizer.ParseErrorPreviousToken( ++ 'Message type "%s" should not have multiple "%s" extensions.' % ++ (message.DESCRIPTOR.full_name, field.full_name)) ++ else: ++ message.Extensions[field] = value ++ else: ++ if not allow_multiple_scalars and message.HasField(field.name): ++ raise tokenizer.ParseErrorPreviousToken( ++ 'Message type "%s" should not have multiple "%s" fields.' % ++ (message.DESCRIPTOR.full_name, field.name)) ++ else: ++ setattr(message, field.name, value) ++ ++ ++class _Tokenizer(object): ++ """Protocol buffer ASCII representation tokenizer. ++ ++ This class handles the lower level string parsing by splitting it into ++ meaningful tokens. ++ ++ It was directly ported from the Java protocol buffer API. ++ """ ++ ++ _WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE) ++ _TOKEN = re.compile( ++ '[a-zA-Z_][0-9a-zA-Z_+-]*|' # an identifier ++ '[0-9+-][0-9a-zA-Z_.+-]*|' # a number ++ '\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|' # a double-quoted string ++ '\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)') # a single-quoted string ++ _IDENTIFIER = re.compile(r'\w+') ++ ++ def __init__(self, lines): ++ self._position = 0 ++ self._line = -1 ++ self._column = 0 ++ self._token_start = None ++ self.token = '' ++ self._lines = iter(lines) ++ self._current_line = '' ++ self._previous_line = 0 ++ self._previous_column = 0 ++ self._more_lines = True ++ self._SkipWhitespace() ++ self.NextToken() ++ ++ def AtEnd(self): ++ """Checks the end of the text was reached. ++ ++ Returns: ++ True iff the end was reached. ++ """ ++ return not self.token ++ ++ def _PopLine(self): ++ while len(self._current_line) <= self._column: ++ try: ++ self._current_line = next(self._lines) ++ except StopIteration: ++ self._current_line = '' ++ self._more_lines = False ++ return ++ else: ++ self._line += 1 ++ self._column = 0 ++ ++ def _SkipWhitespace(self): ++ while True: ++ self._PopLine() ++ match = self._WHITESPACE.match(self._current_line, self._column) ++ if not match: ++ break ++ length = len(match.group(0)) ++ self._column += length ++ ++ def TryConsume(self, token): ++ """Tries to consume a given piece of text. ++ ++ Args: ++ token: Text to consume. ++ ++ Returns: ++ True iff the text was consumed. ++ """ ++ if self.token == token: ++ self.NextToken() ++ return True ++ return False ++ ++ def Consume(self, token): ++ """Consumes a piece of text. ++ ++ Args: ++ token: Text to consume. ++ ++ Raises: ++ ParseError: If the text couldn't be consumed. ++ """ ++ if not self.TryConsume(token): ++ raise self._ParseError('Expected "%s".' % token) ++ ++ def ConsumeIdentifier(self): ++ """Consumes protocol message field identifier. ++ ++ Returns: ++ Identifier string. ++ ++ Raises: ++ ParseError: If an identifier couldn't be consumed. ++ """ ++ result = self.token ++ if not self._IDENTIFIER.match(result): ++ raise self._ParseError('Expected identifier.') ++ self.NextToken() ++ return result ++ ++ def ConsumeInt32(self): ++ """Consumes a signed 32bit integer number. ++ ++ Returns: ++ The integer parsed. ++ ++ Raises: ++ ParseError: If a signed 32bit integer couldn't be consumed. ++ """ ++ try: ++ result = ParseInteger(self.token, is_signed=True, is_long=False) ++ except ValueError as e: ++ raise self._ParseError(str(e)) ++ self.NextToken() ++ return result ++ ++ def ConsumeUint32(self): ++ """Consumes an unsigned 32bit integer number. ++ ++ Returns: ++ The integer parsed. ++ ++ Raises: ++ ParseError: If an unsigned 32bit integer couldn't be consumed. ++ """ ++ try: ++ result = ParseInteger(self.token, is_signed=False, is_long=False) ++ except ValueError as e: ++ raise self._ParseError(str(e)) ++ self.NextToken() ++ return result ++ ++ def ConsumeInt64(self): ++ """Consumes a signed 64bit integer number. ++ ++ Returns: ++ The integer parsed. ++ ++ Raises: ++ ParseError: If a signed 64bit integer couldn't be consumed. ++ """ ++ try: ++ result = ParseInteger(self.token, is_signed=True, is_long=True) ++ except ValueError as e: ++ raise self._ParseError(str(e)) ++ self.NextToken() ++ return result ++ ++ def ConsumeUint64(self): ++ """Consumes an unsigned 64bit integer number. ++ ++ Returns: ++ The integer parsed. ++ ++ Raises: ++ ParseError: If an unsigned 64bit integer couldn't be consumed. ++ """ ++ try: ++ result = ParseInteger(self.token, is_signed=False, is_long=True) ++ except ValueError as e: ++ raise self._ParseError(str(e)) ++ self.NextToken() ++ return result ++ ++ def ConsumeFloat(self): ++ """Consumes an floating point number. ++ ++ Returns: ++ The number parsed. ++ ++ Raises: ++ ParseError: If a floating point number couldn't be consumed. ++ """ ++ try: ++ result = ParseFloat(self.token) ++ except ValueError as e: ++ raise self._ParseError(str(e)) ++ self.NextToken() ++ return result ++ ++ def ConsumeBool(self): ++ """Consumes a boolean value. ++ ++ Returns: ++ The bool parsed. ++ ++ Raises: ++ ParseError: If a boolean value couldn't be consumed. ++ """ ++ try: ++ result = ParseBool(self.token) ++ except ValueError as e: ++ raise self._ParseError(str(e)) ++ self.NextToken() ++ return result ++ ++ def ConsumeString(self): ++ """Consumes a string value. ++ ++ Returns: ++ The string parsed. ++ ++ Raises: ++ ParseError: If a string value couldn't be consumed. ++ """ ++ the_bytes = self.ConsumeByteString() ++ try: ++ return six.text_type(the_bytes, 'utf-8') ++ except UnicodeDecodeError as e: ++ raise self._StringParseError(e) ++ ++ def ConsumeByteString(self): ++ """Consumes a byte array value. ++ ++ Returns: ++ The array parsed (as a string). ++ ++ Raises: ++ ParseError: If a byte array value couldn't be consumed. ++ """ ++ the_list = [self._ConsumeSingleByteString()] ++ while self.token and self.token[0] in ('\'', '"'): ++ the_list.append(self._ConsumeSingleByteString()) ++ return b''.join(the_list) ++ ++ def _ConsumeSingleByteString(self): ++ """Consume one token of a string literal. ++ ++ String literals (whether bytes or text) can come in multiple adjacent ++ tokens which are automatically concatenated, like in C or Python. This ++ method only consumes one token. ++ """ ++ text = self.token ++ if len(text) < 1 or text[0] not in ('\'', '"'): ++ raise self._ParseError('Expected string.') ++ ++ if len(text) < 2 or text[-1] != text[0]: ++ raise self._ParseError('String missing ending quote.') ++ ++ try: ++ result = text_encoding.CUnescape(text[1:-1]) ++ except ValueError as e: ++ raise self._ParseError(str(e)) ++ self.NextToken() ++ return result ++ ++ def ConsumeEnum(self, field): ++ try: ++ result = ParseEnum(field, self.token) ++ except ValueError as e: ++ raise self._ParseError(str(e)) ++ self.NextToken() ++ return result ++ ++ def ParseErrorPreviousToken(self, message): ++ """Creates and *returns* a ParseError for the previously read token. ++ ++ Args: ++ message: A message to set for the exception. ++ ++ Returns: ++ A ParseError instance. ++ """ ++ return ParseError('%d:%d : %s' % ( ++ self._previous_line + 1, self._previous_column + 1, message)) ++ ++ def _ParseError(self, message): ++ """Creates and *returns* a ParseError for the current token.""" ++ return ParseError('%d:%d : %s' % ( ++ self._line + 1, self._column + 1, message)) ++ ++ def _StringParseError(self, e): ++ return self._ParseError('Couldn\'t parse string: ' + str(e)) ++ ++ def NextToken(self): ++ """Reads the next meaningful token.""" ++ self._previous_line = self._line ++ self._previous_column = self._column ++ ++ self._column += len(self.token) ++ self._SkipWhitespace() ++ ++ if not self._more_lines: ++ self.token = '' ++ return ++ ++ match = self._TOKEN.match(self._current_line, self._column) ++ if match: ++ token = match.group(0) ++ self.token = token ++ else: ++ self.token = self._current_line[self._column] ++ ++ ++def ParseInteger(text, is_signed=False, is_long=False): ++ """Parses an integer. ++ ++ Args: ++ text: The text to parse. ++ is_signed: True if a signed integer must be parsed. ++ is_long: True if a long integer must be parsed. ++ ++ Returns: ++ The integer value. ++ ++ Raises: ++ ValueError: Thrown Iff the text is not a valid integer. ++ """ ++ # Do the actual parsing. Exception handling is propagated to caller. ++ try: ++ # We force 32-bit values to int and 64-bit values to long to make ++ # alternate implementations where the distinction is more significant ++ # (e.g. the C++ implementation) simpler. ++ if is_long: ++ result = long(text, 0) ++ else: ++ result = int(text, 0) ++ except ValueError: ++ raise ValueError('Couldn\'t parse integer: %s' % text) ++ ++ # Check if the integer is sane. Exceptions handled by callers. ++ checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] ++ checker.CheckValue(result) ++ return result ++ ++ ++def ParseFloat(text): ++ """Parse a floating point number. ++ ++ Args: ++ text: Text to parse. ++ ++ Returns: ++ The number parsed. ++ ++ Raises: ++ ValueError: If a floating point number couldn't be parsed. ++ """ ++ try: ++ # Assume Python compatible syntax. ++ return float(text) ++ except ValueError: ++ # Check alternative spellings. ++ if _FLOAT_INFINITY.match(text): ++ if text[0] == '-': ++ return float('-inf') ++ else: ++ return float('inf') ++ elif _FLOAT_NAN.match(text): ++ return float('nan') ++ else: ++ # assume '1.0f' format ++ try: ++ return float(text.rstrip('f')) ++ except ValueError: ++ raise ValueError('Couldn\'t parse float: %s' % text) ++ ++ ++def ParseBool(text): ++ """Parse a boolean value. ++ ++ Args: ++ text: Text to parse. ++ ++ Returns: ++ Boolean values parsed ++ ++ Raises: ++ ValueError: If text is not a valid boolean. ++ """ ++ if text in ('true', 't', '1'): ++ return True ++ elif text in ('false', 'f', '0'): ++ return False ++ else: ++ raise ValueError('Expected "true" or "false".') ++ ++ ++def ParseEnum(field, value): ++ """Parse an enum value. ++ ++ The value can be specified by a number (the enum value), or by ++ a string literal (the enum name). ++ ++ Args: ++ field: Enum field descriptor. ++ value: String value. ++ ++ Returns: ++ Enum value number. ++ ++ Raises: ++ ValueError: If the enum value could not be parsed. ++ """ ++ enum_descriptor = field.enum_type ++ try: ++ number = int(value, 0) ++ except ValueError: ++ # Identifier. ++ enum_value = enum_descriptor.values_by_name.get(value, None) ++ if enum_value is None: ++ raise ValueError( ++ 'Enum type "%s" has no value named %s.' % ( ++ enum_descriptor.full_name, value)) ++ else: ++ # Numeric value. ++ enum_value = enum_descriptor.values_by_number.get(number, None) ++ if enum_value is None: ++ raise ValueError( ++ 'Enum type "%s" has no value with number %d.' % ( ++ enum_descriptor.full_name, number)) ++ return enum_value.number +--- /dev/null ++++ protobuf-2.6.1/python3/mox.py +@@ -0,0 +1,1401 @@ ++#!/usr/bin/python2.4 ++# ++# Copyright 2008 Google Inc. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); ++# you may not use this file except in compliance with the License. ++# You may obtain a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, ++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++# See the License for the specific language governing permissions and ++# limitations under the License. ++ ++# This file is used for testing. The original is at: ++# http://code.google.com/p/pymox/ ++ ++"""Mox, an object-mocking framework for Python. ++ ++Mox works in the record-replay-verify paradigm. When you first create ++a mock object, it is in record mode. You then programmatically set ++the expected behavior of the mock object (what methods are to be ++called on it, with what parameters, what they should return, and in ++what order). ++ ++Once you have set up the expected mock behavior, you put it in replay ++mode. Now the mock responds to method calls just as you told it to. ++If an unexpected method (or an expected method with unexpected ++parameters) is called, then an exception will be raised. ++ ++Once you are done interacting with the mock, you need to verify that ++all the expected interactions occured. (Maybe your code exited ++prematurely without calling some cleanup method!) The verify phase ++ensures that every expected method was called; otherwise, an exception ++will be raised. ++ ++Suggested usage / workflow: ++ ++ # Create Mox factory ++ my_mox = Mox() ++ ++ # Create a mock data access object ++ mock_dao = my_mox.CreateMock(DAOClass) ++ ++ # Set up expected behavior ++ mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person) ++ mock_dao.DeletePerson(person) ++ ++ # Put mocks in replay mode ++ my_mox.ReplayAll() ++ ++ # Inject mock object and run test ++ controller.SetDao(mock_dao) ++ controller.DeletePersonById('1') ++ ++ # Verify all methods were called as expected ++ my_mox.VerifyAll() ++""" ++ ++from collections import deque ++import re ++import types ++import unittest ++ ++import stubout ++ ++class Error(AssertionError): ++ """Base exception for this module.""" ++ ++ pass ++ ++ ++class ExpectedMethodCallsError(Error): ++ """Raised when Verify() is called before all expected methods have been called ++ """ ++ ++ def __init__(self, expected_methods): ++ """Init exception. ++ ++ Args: ++ # expected_methods: A sequence of MockMethod objects that should have been ++ # called. ++ expected_methods: [MockMethod] ++ ++ Raises: ++ ValueError: if expected_methods contains no methods. ++ """ ++ ++ if not expected_methods: ++ raise ValueError("There must be at least one expected method") ++ Error.__init__(self) ++ self._expected_methods = expected_methods ++ ++ def __str__(self): ++ calls = "\n".join(["%3d. %s" % (i, m) ++ for i, m in enumerate(self._expected_methods)]) ++ return "Verify: Expected methods never called:\n%s" % (calls,) ++ ++ ++class UnexpectedMethodCallError(Error): ++ """Raised when an unexpected method is called. ++ ++ This can occur if a method is called with incorrect parameters, or out of the ++ specified order. ++ """ ++ ++ def __init__(self, unexpected_method, expected): ++ """Init exception. ++ ++ Args: ++ # unexpected_method: MockMethod that was called but was not at the head of ++ # the expected_method queue. ++ # expected: MockMethod or UnorderedGroup the method should have ++ # been in. ++ unexpected_method: MockMethod ++ expected: MockMethod or UnorderedGroup ++ """ ++ ++ Error.__init__(self) ++ self._unexpected_method = unexpected_method ++ self._expected = expected ++ ++ def __str__(self): ++ return "Unexpected method call: %s. Expecting: %s" % \ ++ (self._unexpected_method, self._expected) ++ ++ ++class UnknownMethodCallError(Error): ++ """Raised if an unknown method is requested of the mock object.""" ++ ++ def __init__(self, unknown_method_name): ++ """Init exception. ++ ++ Args: ++ # unknown_method_name: Method call that is not part of the mocked class's ++ # public interface. ++ unknown_method_name: str ++ """ ++ ++ Error.__init__(self) ++ self._unknown_method_name = unknown_method_name ++ ++ def __str__(self): ++ return "Method called is not a member of the object: %s" % \ ++ self._unknown_method_name ++ ++ ++class Mox(object): ++ """Mox: a factory for creating mock objects.""" ++ ++ # A list of types that should be stubbed out with MockObjects (as ++ # opposed to MockAnythings). ++ _USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType, ++ types.ObjectType, types.TypeType] ++ ++ def __init__(self): ++ """Initialize a new Mox.""" ++ ++ self._mock_objects = [] ++ self.stubs = stubout.StubOutForTesting() ++ ++ def CreateMock(self, class_to_mock): ++ """Create a new mock object. ++ ++ Args: ++ # class_to_mock: the class to be mocked ++ class_to_mock: class ++ ++ Returns: ++ MockObject that can be used as the class_to_mock would be. ++ """ ++ ++ new_mock = MockObject(class_to_mock) ++ self._mock_objects.append(new_mock) ++ return new_mock ++ ++ def CreateMockAnything(self): ++ """Create a mock that will accept any method calls. ++ ++ This does not enforce an interface. ++ """ ++ ++ new_mock = MockAnything() ++ self._mock_objects.append(new_mock) ++ return new_mock ++ ++ def ReplayAll(self): ++ """Set all mock objects to replay mode.""" ++ ++ for mock_obj in self._mock_objects: ++ mock_obj._Replay() ++ ++ ++ def VerifyAll(self): ++ """Call verify on all mock objects created.""" ++ ++ for mock_obj in self._mock_objects: ++ mock_obj._Verify() ++ ++ def ResetAll(self): ++ """Call reset on all mock objects. This does not unset stubs.""" ++ ++ for mock_obj in self._mock_objects: ++ mock_obj._Reset() ++ ++ def StubOutWithMock(self, obj, attr_name, use_mock_anything=False): ++ """Replace a method, attribute, etc. with a Mock. ++ ++ This will replace a class or module with a MockObject, and everything else ++ (method, function, etc) with a MockAnything. This can be overridden to ++ always use a MockAnything by setting use_mock_anything to True. ++ ++ Args: ++ obj: A Python object (class, module, instance, callable). ++ attr_name: str. The name of the attribute to replace with a mock. ++ use_mock_anything: bool. True if a MockAnything should be used regardless ++ of the type of attribute. ++ """ ++ ++ attr_to_replace = getattr(obj, attr_name) ++ if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything: ++ stub = self.CreateMock(attr_to_replace) ++ else: ++ stub = self.CreateMockAnything() ++ ++ self.stubs.Set(obj, attr_name, stub) ++ ++ def UnsetStubs(self): ++ """Restore stubs to their original state.""" ++ ++ self.stubs.UnsetAll() ++ ++def Replay(*args): ++ """Put mocks into Replay mode. ++ ++ Args: ++ # args is any number of mocks to put into replay mode. ++ """ ++ ++ for mock in args: ++ mock._Replay() ++ ++ ++def Verify(*args): ++ """Verify mocks. ++ ++ Args: ++ # args is any number of mocks to be verified. ++ """ ++ ++ for mock in args: ++ mock._Verify() ++ ++ ++def Reset(*args): ++ """Reset mocks. ++ ++ Args: ++ # args is any number of mocks to be reset. ++ """ ++ ++ for mock in args: ++ mock._Reset() ++ ++ ++class MockAnything: ++ """A mock that can be used to mock anything. ++ ++ This is helpful for mocking classes that do not provide a public interface. ++ """ ++ ++ def __init__(self): ++ """ """ ++ self._Reset() ++ ++ def __getattr__(self, method_name): ++ """Intercept method calls on this object. ++ ++ A new MockMethod is returned that is aware of the MockAnything's ++ state (record or replay). The call will be recorded or replayed ++ by the MockMethod's __call__. ++ ++ Args: ++ # method name: the name of the method being called. ++ method_name: str ++ ++ Returns: ++ A new MockMethod aware of MockAnything's state (record or replay). ++ """ ++ ++ return self._CreateMockMethod(method_name) ++ ++ def _CreateMockMethod(self, method_name): ++ """Create a new mock method call and return it. ++ ++ Args: ++ # method name: the name of the method being called. ++ method_name: str ++ ++ Returns: ++ A new MockMethod aware of MockAnything's state (record or replay). ++ """ ++ ++ return MockMethod(method_name, self._expected_calls_queue, ++ self._replay_mode) ++ ++ def __nonzero__(self): ++ """Return 1 for nonzero so the mock can be used as a conditional.""" ++ ++ return 1 ++ ++ def __eq__(self, rhs): ++ """Provide custom logic to compare objects.""" ++ ++ return (isinstance(rhs, MockAnything) and ++ self._replay_mode == rhs._replay_mode and ++ self._expected_calls_queue == rhs._expected_calls_queue) ++ ++ def __ne__(self, rhs): ++ """Provide custom logic to compare objects.""" ++ ++ return not self == rhs ++ ++ def _Replay(self): ++ """Start replaying expected method calls.""" ++ ++ self._replay_mode = True ++ ++ def _Verify(self): ++ """Verify that all of the expected calls have been made. ++ ++ Raises: ++ ExpectedMethodCallsError: if there are still more method calls in the ++ expected queue. ++ """ ++ ++ # If the list of expected calls is not empty, raise an exception ++ if self._expected_calls_queue: ++ # The last MultipleTimesGroup is not popped from the queue. ++ if (len(self._expected_calls_queue) == 1 and ++ isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and ++ self._expected_calls_queue[0].IsSatisfied()): ++ pass ++ else: ++ raise ExpectedMethodCallsError(self._expected_calls_queue) ++ ++ def _Reset(self): ++ """Reset the state of this mock to record mode with an empty queue.""" ++ ++ # Maintain a list of method calls we are expecting ++ self._expected_calls_queue = deque() ++ ++ # Make sure we are in setup mode, not replay mode ++ self._replay_mode = False ++ ++ ++class MockObject(MockAnything, object): ++ """A mock object that simulates the public/protected interface of a class.""" ++ ++ def __init__(self, class_to_mock): ++ """Initialize a mock object. ++ ++ This determines the methods and properties of the class and stores them. ++ ++ Args: ++ # class_to_mock: class to be mocked ++ class_to_mock: class ++ """ ++ ++ # This is used to hack around the mixin/inheritance of MockAnything, which ++ # is not a proper object (it can be anything. :-) ++ MockAnything.__dict__['__init__'](self) ++ ++ # Get a list of all the public and special methods we should mock. ++ self._known_methods = set() ++ self._known_vars = set() ++ self._class_to_mock = class_to_mock ++ for method in dir(class_to_mock): ++ if callable(getattr(class_to_mock, method)): ++ self._known_methods.add(method) ++ else: ++ self._known_vars.add(method) ++ ++ def __getattr__(self, name): ++ """Intercept attribute request on this object. ++ ++ If the attribute is a public class variable, it will be returned and not ++ recorded as a call. ++ ++ If the attribute is not a variable, it is handled like a method ++ call. The method name is checked against the set of mockable ++ methods, and a new MockMethod is returned that is aware of the ++ MockObject's state (record or replay). The call will be recorded ++ or replayed by the MockMethod's __call__. ++ ++ Args: ++ # name: the name of the attribute being requested. ++ name: str ++ ++ Returns: ++ Either a class variable or a new MockMethod that is aware of the state ++ of the mock (record or replay). ++ ++ Raises: ++ UnknownMethodCallError if the MockObject does not mock the requested ++ method. ++ """ ++ ++ if name in self._known_vars: ++ return getattr(self._class_to_mock, name) ++ ++ if name in self._known_methods: ++ return self._CreateMockMethod(name) ++ ++ raise UnknownMethodCallError(name) ++ ++ def __eq__(self, rhs): ++ """Provide custom logic to compare objects.""" ++ ++ return (isinstance(rhs, MockObject) and ++ self._class_to_mock == rhs._class_to_mock and ++ self._replay_mode == rhs._replay_mode and ++ self._expected_calls_queue == rhs._expected_calls_queue) ++ ++ def __setitem__(self, key, value): ++ """Provide custom logic for mocking classes that support item assignment. ++ ++ Args: ++ key: Key to set the value for. ++ value: Value to set. ++ ++ Returns: ++ Expected return value in replay mode. A MockMethod object for the ++ __setitem__ method that has already been called if not in replay mode. ++ ++ Raises: ++ TypeError if the underlying class does not support item assignment. ++ UnexpectedMethodCallError if the object does not expect the call to ++ __setitem__. ++ ++ """ ++ setitem = self._class_to_mock.__dict__.get('__setitem__', None) ++ ++ # Verify the class supports item assignment. ++ if setitem is None: ++ raise TypeError('object does not support item assignment') ++ ++ # If we are in replay mode then simply call the mock __setitem__ method. ++ if self._replay_mode: ++ return MockMethod('__setitem__', self._expected_calls_queue, ++ self._replay_mode)(key, value) ++ ++ ++ # Otherwise, create a mock method __setitem__. ++ return self._CreateMockMethod('__setitem__')(key, value) ++ ++ def __getitem__(self, key): ++ """Provide custom logic for mocking classes that are subscriptable. ++ ++ Args: ++ key: Key to return the value for. ++ ++ Returns: ++ Expected return value in replay mode. A MockMethod object for the ++ __getitem__ method that has already been called if not in replay mode. ++ ++ Raises: ++ TypeError if the underlying class is not subscriptable. ++ UnexpectedMethodCallError if the object does not expect the call to ++ __setitem__. ++ ++ """ ++ getitem = self._class_to_mock.__dict__.get('__getitem__', None) ++ ++ # Verify the class supports item assignment. ++ if getitem is None: ++ raise TypeError('unsubscriptable object') ++ ++ # If we are in replay mode then simply call the mock __getitem__ method. ++ if self._replay_mode: ++ return MockMethod('__getitem__', self._expected_calls_queue, ++ self._replay_mode)(key) ++ ++ ++ # Otherwise, create a mock method __getitem__. ++ return self._CreateMockMethod('__getitem__')(key) ++ ++ def __call__(self, *params, **named_params): ++ """Provide custom logic for mocking classes that are callable.""" ++ ++ # Verify the class we are mocking is callable ++ callable = self._class_to_mock.__dict__.get('__call__', None) ++ if callable is None: ++ raise TypeError('Not callable') ++ ++ # Because the call is happening directly on this object instead of a method, ++ # the call on the mock method is made right here ++ mock_method = self._CreateMockMethod('__call__') ++ return mock_method(*params, **named_params) ++ ++ @property ++ def __class__(self): ++ """Return the class that is being mocked.""" ++ ++ return self._class_to_mock ++ ++ ++class MockMethod(object): ++ """Callable mock method. ++ ++ A MockMethod should act exactly like the method it mocks, accepting parameters ++ and returning a value, or throwing an exception (as specified). When this ++ method is called, it can optionally verify whether the called method (name and ++ signature) matches the expected method. ++ """ ++ ++ def __init__(self, method_name, call_queue, replay_mode): ++ """Construct a new mock method. ++ ++ Args: ++ # method_name: the name of the method ++ # call_queue: deque of calls, verify this call against the head, or add ++ # this call to the queue. ++ # replay_mode: False if we are recording, True if we are verifying calls ++ # against the call queue. ++ method_name: str ++ call_queue: list or deque ++ replay_mode: bool ++ """ ++ ++ self._name = method_name ++ self._call_queue = call_queue ++ if not isinstance(call_queue, deque): ++ self._call_queue = deque(self._call_queue) ++ self._replay_mode = replay_mode ++ ++ self._params = None ++ self._named_params = None ++ self._return_value = None ++ self._exception = None ++ self._side_effects = None ++ ++ def __call__(self, *params, **named_params): ++ """Log parameters and return the specified return value. ++ ++ If the Mock(Anything/Object) associated with this call is in record mode, ++ this MockMethod will be pushed onto the expected call queue. If the mock ++ is in replay mode, this will pop a MockMethod off the top of the queue and ++ verify this call is equal to the expected call. ++ ++ Raises: ++ UnexpectedMethodCall if this call is supposed to match an expected method ++ call and it does not. ++ """ ++ ++ self._params = params ++ self._named_params = named_params ++ ++ if not self._replay_mode: ++ self._call_queue.append(self) ++ return self ++ ++ expected_method = self._VerifyMethodCall() ++ ++ if expected_method._side_effects: ++ expected_method._side_effects(*params, **named_params) ++ ++ if expected_method._exception: ++ raise expected_method._exception ++ ++ return expected_method._return_value ++ ++ def __getattr__(self, name): ++ """Raise an AttributeError with a helpful message.""" ++ ++ raise AttributeError('MockMethod has no attribute "%s". ' ++ 'Did you remember to put your mocks in replay mode?' % name) ++ ++ def _PopNextMethod(self): ++ """Pop the next method from our call queue.""" ++ try: ++ return self._call_queue.popleft() ++ except IndexError: ++ raise UnexpectedMethodCallError(self, None) ++ ++ def _VerifyMethodCall(self): ++ """Verify the called method is expected. ++ ++ This can be an ordered method, or part of an unordered set. ++ ++ Returns: ++ The expected mock method. ++ ++ Raises: ++ UnexpectedMethodCall if the method called was not expected. ++ """ ++ ++ expected = self._PopNextMethod() ++ ++ # Loop here, because we might have a MethodGroup followed by another ++ # group. ++ while isinstance(expected, MethodGroup): ++ expected, method = expected.MethodCalled(self) ++ if method is not None: ++ return method ++ ++ # This is a mock method, so just check equality. ++ if expected != self: ++ raise UnexpectedMethodCallError(self, expected) ++ ++ return expected ++ ++ def __str__(self): ++ params = ', '.join( ++ [repr(p) for p in self._params or []] + ++ ['%s=%r' % x for x in sorted((self._named_params or {}).items())]) ++ desc = "%s(%s) -> %r" % (self._name, params, self._return_value) ++ return desc ++ ++ def __eq__(self, rhs): ++ """Test whether this MockMethod is equivalent to another MockMethod. ++ ++ Args: ++ # rhs: the right hand side of the test ++ rhs: MockMethod ++ """ ++ ++ return (isinstance(rhs, MockMethod) and ++ self._name == rhs._name and ++ self._params == rhs._params and ++ self._named_params == rhs._named_params) ++ ++ def __ne__(self, rhs): ++ """Test whether this MockMethod is not equivalent to another MockMethod. ++ ++ Args: ++ # rhs: the right hand side of the test ++ rhs: MockMethod ++ """ ++ ++ return not self == rhs ++ ++ def GetPossibleGroup(self): ++ """Returns a possible group from the end of the call queue or None if no ++ other methods are on the stack. ++ """ ++ ++ # Remove this method from the tail of the queue so we can add it to a group. ++ this_method = self._call_queue.pop() ++ assert this_method == self ++ ++ # Determine if the tail of the queue is a group, or just a regular ordered ++ # mock method. ++ group = None ++ try: ++ group = self._call_queue[-1] ++ except IndexError: ++ pass ++ ++ return group ++ ++ def _CheckAndCreateNewGroup(self, group_name, group_class): ++ """Checks if the last method (a possible group) is an instance of our ++ group_class. Adds the current method to this group or creates a new one. ++ ++ Args: ++ ++ group_name: the name of the group. ++ group_class: the class used to create instance of this new group ++ """ ++ group = self.GetPossibleGroup() ++ ++ # If this is a group, and it is the correct group, add the method. ++ if isinstance(group, group_class) and group.group_name() == group_name: ++ group.AddMethod(self) ++ return self ++ ++ # Create a new group and add the method. ++ new_group = group_class(group_name) ++ new_group.AddMethod(self) ++ self._call_queue.append(new_group) ++ return self ++ ++ def InAnyOrder(self, group_name="default"): ++ """Move this method into a group of unordered calls. ++ ++ A group of unordered calls must be defined together, and must be executed ++ in full before the next expected method can be called. There can be ++ multiple groups that are expected serially, if they are given ++ different group names. The same group name can be reused if there is a ++ standard method call, or a group with a different name, spliced between ++ usages. ++ ++ Args: ++ group_name: the name of the unordered group. ++ ++ Returns: ++ self ++ """ ++ return self._CheckAndCreateNewGroup(group_name, UnorderedGroup) ++ ++ def MultipleTimes(self, group_name="default"): ++ """Move this method into group of calls which may be called multiple times. ++ ++ A group of repeating calls must be defined together, and must be executed in ++ full before the next expected mehtod can be called. ++ ++ Args: ++ group_name: the name of the unordered group. ++ ++ Returns: ++ self ++ """ ++ return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup) ++ ++ def AndReturn(self, return_value): ++ """Set the value to return when this method is called. ++ ++ Args: ++ # return_value can be anything. ++ """ ++ ++ self._return_value = return_value ++ return return_value ++ ++ def AndRaise(self, exception): ++ """Set the exception to raise when this method is called. ++ ++ Args: ++ # exception: the exception to raise when this method is called. ++ exception: Exception ++ """ ++ ++ self._exception = exception ++ ++ def WithSideEffects(self, side_effects): ++ """Set the side effects that are simulated when this method is called. ++ ++ Args: ++ side_effects: A callable which modifies the parameters or other relevant ++ state which a given test case depends on. ++ ++ Returns: ++ Self for chaining with AndReturn and AndRaise. ++ """ ++ self._side_effects = side_effects ++ return self ++ ++class Comparator: ++ """Base class for all Mox comparators. ++ ++ A Comparator can be used as a parameter to a mocked method when the exact ++ value is not known. For example, the code you are testing might build up a ++ long SQL string that is passed to your mock DAO. You're only interested that ++ the IN clause contains the proper primary keys, so you can set your mock ++ up as follows: ++ ++ mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result) ++ ++ Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'. ++ ++ A Comparator may replace one or more parameters, for example: ++ # return at most 10 rows ++ mock_dao.RunQuery(StrContains('SELECT'), 10) ++ ++ or ++ ++ # Return some non-deterministic number of rows ++ mock_dao.RunQuery(StrContains('SELECT'), IsA(int)) ++ """ ++ ++ def equals(self, rhs): ++ """Special equals method that all comparators must implement. ++ ++ Args: ++ rhs: any python object ++ """ ++ ++ raise NotImplementedError, 'method must be implemented by a subclass.' ++ ++ def __eq__(self, rhs): ++ return self.equals(rhs) ++ ++ def __ne__(self, rhs): ++ return not self.equals(rhs) ++ ++ ++class IsA(Comparator): ++ """This class wraps a basic Python type or class. It is used to verify ++ that a parameter is of the given type or class. ++ ++ Example: ++ mock_dao.Connect(IsA(DbConnectInfo)) ++ """ ++ ++ def __init__(self, class_name): ++ """Initialize IsA ++ ++ Args: ++ class_name: basic python type or a class ++ """ ++ ++ self._class_name = class_name ++ ++ def equals(self, rhs): ++ """Check to see if the RHS is an instance of class_name. ++ ++ Args: ++ # rhs: the right hand side of the test ++ rhs: object ++ ++ Returns: ++ bool ++ """ ++ ++ try: ++ return isinstance(rhs, self._class_name) ++ except TypeError: ++ # Check raw types if there was a type error. This is helpful for ++ # things like cStringIO.StringIO. ++ return type(rhs) == type(self._class_name) ++ ++ def __repr__(self): ++ return str(self._class_name) ++ ++class IsAlmost(Comparator): ++ """Comparison class used to check whether a parameter is nearly equal ++ to a given value. Generally useful for floating point numbers. ++ ++ Example mock_dao.SetTimeout((IsAlmost(3.9))) ++ """ ++ ++ def __init__(self, float_value, places=7): ++ """Initialize IsAlmost. ++ ++ Args: ++ float_value: The value for making the comparison. ++ places: The number of decimal places to round to. ++ """ ++ ++ self._float_value = float_value ++ self._places = places ++ ++ def equals(self, rhs): ++ """Check to see if RHS is almost equal to float_value ++ ++ Args: ++ rhs: the value to compare to float_value ++ ++ Returns: ++ bool ++ """ ++ ++ try: ++ return round(rhs-self._float_value, self._places) == 0 ++ except TypeError: ++ # This is probably because either float_value or rhs is not a number. ++ return False ++ ++ def __repr__(self): ++ return str(self._float_value) ++ ++class StrContains(Comparator): ++ """Comparison class used to check whether a substring exists in a ++ string parameter. This can be useful in mocking a database with SQL ++ passed in as a string parameter, for example. ++ ++ Example: ++ mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result) ++ """ ++ ++ def __init__(self, search_string): ++ """Initialize. ++ ++ Args: ++ # search_string: the string you are searching for ++ search_string: str ++ """ ++ ++ self._search_string = search_string ++ ++ def equals(self, rhs): ++ """Check to see if the search_string is contained in the rhs string. ++ ++ Args: ++ # rhs: the right hand side of the test ++ rhs: object ++ ++ Returns: ++ bool ++ """ ++ ++ try: ++ return rhs.find(self._search_string) > -1 ++ except Exception: ++ return False ++ ++ def __repr__(self): ++ return '' % self._search_string ++ ++ ++class Regex(Comparator): ++ """Checks if a string matches a regular expression. ++ ++ This uses a given regular expression to determine equality. ++ """ ++ ++ def __init__(self, pattern, flags=0): ++ """Initialize. ++ ++ Args: ++ # pattern is the regular expression to search for ++ pattern: str ++ # flags passed to re.compile function as the second argument ++ flags: int ++ """ ++ ++ self.regex = re.compile(pattern, flags=flags) ++ ++ def equals(self, rhs): ++ """Check to see if rhs matches regular expression pattern. ++ ++ Returns: ++ bool ++ """ ++ ++ return self.regex.search(rhs) is not None ++ ++ def __repr__(self): ++ s = '' % self._key ++ ++ ++class ContainsKeyValue(Comparator): ++ """Checks whether a key/value pair is in a dict parameter. ++ ++ Example: ++ mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info)) ++ """ ++ ++ def __init__(self, key, value): ++ """Initialize. ++ ++ Args: ++ # key: a key in a dict ++ # value: the corresponding value ++ """ ++ ++ self._key = key ++ self._value = value ++ ++ def equals(self, rhs): ++ """Check whether the given key/value pair is in the rhs dict. ++ ++ Returns: ++ bool ++ """ ++ ++ try: ++ return rhs[self._key] == self._value ++ except Exception: ++ return False ++ ++ def __repr__(self): ++ return '' % (self._key, self._value) ++ ++ ++class SameElementsAs(Comparator): ++ """Checks whether iterables contain the same elements (ignoring order). ++ ++ Example: ++ mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki')) ++ """ ++ ++ def __init__(self, expected_seq): ++ """Initialize. ++ ++ Args: ++ expected_seq: a sequence ++ """ ++ ++ self._expected_seq = expected_seq ++ ++ def equals(self, actual_seq): ++ """Check to see whether actual_seq has same elements as expected_seq. ++ ++ Args: ++ actual_seq: sequence ++ ++ Returns: ++ bool ++ """ ++ ++ try: ++ expected = dict([(element, None) for element in self._expected_seq]) ++ actual = dict([(element, None) for element in actual_seq]) ++ except TypeError: ++ # Fall back to slower list-compare if any of the objects are unhashable. ++ expected = list(self._expected_seq) ++ actual = list(actual_seq) ++ expected.sort() ++ actual.sort() ++ return expected == actual ++ ++ def __repr__(self): ++ return '' % self._expected_seq ++ ++ ++class And(Comparator): ++ """Evaluates one or more Comparators on RHS and returns an AND of the results. ++ """ ++ ++ def __init__(self, *args): ++ """Initialize. ++ ++ Args: ++ *args: One or more Comparator ++ """ ++ ++ self._comparators = args ++ ++ def equals(self, rhs): ++ """Checks whether all Comparators are equal to rhs. ++ ++ Args: ++ # rhs: can be anything ++ ++ Returns: ++ bool ++ """ ++ ++ for comparator in self._comparators: ++ if not comparator.equals(rhs): ++ return False ++ ++ return True ++ ++ def __repr__(self): ++ return '' % str(self._comparators) ++ ++ ++class Or(Comparator): ++ """Evaluates one or more Comparators on RHS and returns an OR of the results. ++ """ ++ ++ def __init__(self, *args): ++ """Initialize. ++ ++ Args: ++ *args: One or more Mox comparators ++ """ ++ ++ self._comparators = args ++ ++ def equals(self, rhs): ++ """Checks whether any Comparator is equal to rhs. ++ ++ Args: ++ # rhs: can be anything ++ ++ Returns: ++ bool ++ """ ++ ++ for comparator in self._comparators: ++ if comparator.equals(rhs): ++ return True ++ ++ return False ++ ++ def __repr__(self): ++ return '' % str(self._comparators) ++ ++ ++class Func(Comparator): ++ """Call a function that should verify the parameter passed in is correct. ++ ++ You may need the ability to perform more advanced operations on the parameter ++ in order to validate it. You can use this to have a callable validate any ++ parameter. The callable should return either True or False. ++ ++ ++ Example: ++ ++ def myParamValidator(param): ++ # Advanced logic here ++ return True ++ ++ mock_dao.DoSomething(Func(myParamValidator), true) ++ """ ++ ++ def __init__(self, func): ++ """Initialize. ++ ++ Args: ++ func: callable that takes one parameter and returns a bool ++ """ ++ ++ self._func = func ++ ++ def equals(self, rhs): ++ """Test whether rhs passes the function test. ++ ++ rhs is passed into func. ++ ++ Args: ++ rhs: any python object ++ ++ Returns: ++ the result of func(rhs) ++ """ ++ ++ return self._func(rhs) ++ ++ def __repr__(self): ++ return str(self._func) ++ ++ ++class IgnoreArg(Comparator): ++ """Ignore an argument. ++ ++ This can be used when we don't care about an argument of a method call. ++ ++ Example: ++ # Check if CastMagic is called with 3 as first arg and 'disappear' as third. ++ mymock.CastMagic(3, IgnoreArg(), 'disappear') ++ """ ++ ++ def equals(self, unused_rhs): ++ """Ignores arguments and returns True. ++ ++ Args: ++ unused_rhs: any python object ++ ++ Returns: ++ always returns True ++ """ ++ ++ return True ++ ++ def __repr__(self): ++ return '' ++ ++ ++class MethodGroup(object): ++ """Base class containing common behaviour for MethodGroups.""" ++ ++ def __init__(self, group_name): ++ self._group_name = group_name ++ ++ def group_name(self): ++ return self._group_name ++ ++ def __str__(self): ++ return '<%s "%s">' % (self.__class__.__name__, self._group_name) ++ ++ def AddMethod(self, mock_method): ++ raise NotImplementedError ++ ++ def MethodCalled(self, mock_method): ++ raise NotImplementedError ++ ++ def IsSatisfied(self): ++ raise NotImplementedError ++ ++class UnorderedGroup(MethodGroup): ++ """UnorderedGroup holds a set of method calls that may occur in any order. ++ ++ This construct is helpful for non-deterministic events, such as iterating ++ over the keys of a dict. ++ """ ++ ++ def __init__(self, group_name): ++ super(UnorderedGroup, self).__init__(group_name) ++ self._methods = [] ++ ++ def AddMethod(self, mock_method): ++ """Add a method to this group. ++ ++ Args: ++ mock_method: A mock method to be added to this group. ++ """ ++ ++ self._methods.append(mock_method) ++ ++ def MethodCalled(self, mock_method): ++ """Remove a method call from the group. ++ ++ If the method is not in the set, an UnexpectedMethodCallError will be ++ raised. ++ ++ Args: ++ mock_method: a mock method that should be equal to a method in the group. ++ ++ Returns: ++ The mock method from the group ++ ++ Raises: ++ UnexpectedMethodCallError if the mock_method was not in the group. ++ """ ++ ++ # Check to see if this method exists, and if so, remove it from the set ++ # and return it. ++ for method in self._methods: ++ if method == mock_method: ++ # Remove the called mock_method instead of the method in the group. ++ # The called method will match any comparators when equality is checked ++ # during removal. The method in the group could pass a comparator to ++ # another comparator during the equality check. ++ self._methods.remove(mock_method) ++ ++ # If this group is not empty, put it back at the head of the queue. ++ if not self.IsSatisfied(): ++ mock_method._call_queue.appendleft(self) ++ ++ return self, method ++ ++ raise UnexpectedMethodCallError(mock_method, self) ++ ++ def IsSatisfied(self): ++ """Return True if there are not any methods in this group.""" ++ ++ return len(self._methods) == 0 ++ ++ ++class MultipleTimesGroup(MethodGroup): ++ """MultipleTimesGroup holds methods that may be called any number of times. ++ ++ Note: Each method must be called at least once. ++ ++ This is helpful, if you don't know or care how many times a method is called. ++ """ ++ ++ def __init__(self, group_name): ++ super(MultipleTimesGroup, self).__init__(group_name) ++ self._methods = set() ++ self._methods_called = set() ++ ++ def AddMethod(self, mock_method): ++ """Add a method to this group. ++ ++ Args: ++ mock_method: A mock method to be added to this group. ++ """ ++ ++ self._methods.add(mock_method) ++ ++ def MethodCalled(self, mock_method): ++ """Remove a method call from the group. ++ ++ If the method is not in the set, an UnexpectedMethodCallError will be ++ raised. ++ ++ Args: ++ mock_method: a mock method that should be equal to a method in the group. ++ ++ Returns: ++ The mock method from the group ++ ++ Raises: ++ UnexpectedMethodCallError if the mock_method was not in the group. ++ """ ++ ++ # Check to see if this method exists, and if so add it to the set of ++ # called methods. ++ ++ for method in self._methods: ++ if method == mock_method: ++ self._methods_called.add(mock_method) ++ # Always put this group back on top of the queue, because we don't know ++ # when we are done. ++ mock_method._call_queue.appendleft(self) ++ return self, method ++ ++ if self.IsSatisfied(): ++ next_method = mock_method._PopNextMethod(); ++ return next_method, None ++ else: ++ raise UnexpectedMethodCallError(mock_method, self) ++ ++ def IsSatisfied(self): ++ """Return True if all methods in this group are called at least once.""" ++ # NOTE(psycho): We can't use the simple set difference here because we want ++ # to match different parameters which are considered the same e.g. IsA(str) ++ # and some string. This solution is O(n^2) but n should be small. ++ tmp = self._methods.copy() ++ for called in self._methods_called: ++ for expected in tmp: ++ if called == expected: ++ tmp.remove(expected) ++ if not tmp: ++ return True ++ break ++ return False ++ ++ ++class MoxMetaTestBase(type): ++ """Metaclass to add mox cleanup and verification to every test. ++ ++ As the mox unit testing class is being constructed (MoxTestBase or a ++ subclass), this metaclass will modify all test functions to call the ++ CleanUpMox method of the test class after they finish. This means that ++ unstubbing and verifying will happen for every test with no additional code, ++ and any failures will result in test failures as opposed to errors. ++ """ ++ ++ def __init__(cls, name, bases, d): ++ type.__init__(cls, name, bases, d) ++ ++ # also get all the attributes from the base classes to account ++ # for a case when test class is not the immediate child of MoxTestBase ++ for base in bases: ++ for attr_name in dir(base): ++ d[attr_name] = getattr(base, attr_name) ++ ++ for func_name, func in d.items(): ++ if func_name.startswith('test') and callable(func): ++ setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func)) ++ ++ @staticmethod ++ def CleanUpTest(cls, func): ++ """Adds Mox cleanup code to any MoxTestBase method. ++ ++ Always unsets stubs after a test. Will verify all mocks for tests that ++ otherwise pass. ++ ++ Args: ++ cls: MoxTestBase or subclass; the class whose test method we are altering. ++ func: method; the method of the MoxTestBase test class we wish to alter. ++ ++ Returns: ++ The modified method. ++ """ ++ def new_method(self, *args, **kwargs): ++ mox_obj = getattr(self, 'mox', None) ++ cleanup_mox = False ++ if mox_obj and isinstance(mox_obj, Mox): ++ cleanup_mox = True ++ try: ++ func(self, *args, **kwargs) ++ finally: ++ if cleanup_mox: ++ mox_obj.UnsetStubs() ++ if cleanup_mox: ++ mox_obj.VerifyAll() ++ new_method.__name__ = func.__name__ ++ new_method.__doc__ = func.__doc__ ++ new_method.__module__ = func.__module__ ++ return new_method ++ ++ ++class MoxTestBase(unittest.TestCase): ++ """Convenience test class to make stubbing easier. ++ ++ Sets up a "mox" attribute which is an instance of Mox - any mox tests will ++ want this. Also automatically unsets any stubs and verifies that all mock ++ methods have been called at the end of each test, eliminating boilerplate ++ code. ++ """ ++ ++ __metaclass__ = MoxMetaTestBase ++ ++ def setUp(self): ++ self.mox = Mox() +--- /dev/null ++++ protobuf-2.6.1/python3/setup.py +@@ -0,0 +1,191 @@ ++#! /usr/bin/python ++# ++# See README for usage instructions. ++import sys ++import os ++import subprocess ++ ++# We must use setuptools, not distutils, because we need to use the ++# namespace_packages option for the "google" package. ++from setuptools import setup, Extension ++ ++from distutils.command.clean import clean as _clean ++from distutils.command.build_py import build_py as _build_py ++from distutils.spawn import find_executable ++ ++maintainer_email = "protobuf@googlegroups.com" ++ ++# Find the Protocol Compiler. ++if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']): ++ protoc = os.environ['PROTOC'] ++elif os.path.exists("../src/protoc"): ++ protoc = "../src/protoc" ++elif os.path.exists("../src/protoc.exe"): ++ protoc = "../src/protoc.exe" ++elif os.path.exists("../vsprojects/Debug/protoc.exe"): ++ protoc = "../vsprojects/Debug/protoc.exe" ++elif os.path.exists("../vsprojects/Release/protoc.exe"): ++ protoc = "../vsprojects/Release/protoc.exe" ++else: ++ protoc = find_executable("protoc") ++ ++def generate_proto(source): ++ """Invokes the Protocol Compiler to generate a _pb2.py from the given ++ .proto file. Does nothing if the output already exists and is newer than ++ the input.""" ++ ++ output = source.replace(".proto", "_pb2.py").replace("../src/", "") ++ ++ if (not os.path.exists(output) or ++ (os.path.exists(source) and ++ os.path.getmtime(source) > os.path.getmtime(output))): ++ print ("Generating %s..." % output) ++ ++ if not os.path.exists(source): ++ sys.stderr.write("Can't find required file: %s\n" % source) ++ sys.exit(-1) ++ ++ if protoc == None: ++ sys.stderr.write( ++ "protoc is not installed nor found in ../src. Please compile it " ++ "or install the binary package.\n") ++ sys.exit(-1) ++ ++ protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ] ++ if subprocess.call(protoc_command) != 0: ++ sys.exit(-1) ++ ++def GenerateUnittestProtos(): ++ generate_proto("../src/google/protobuf/unittest.proto") ++ generate_proto("../src/google/protobuf/unittest_custom_options.proto") ++ generate_proto("../src/google/protobuf/unittest_import.proto") ++ generate_proto("../src/google/protobuf/unittest_import_public.proto") ++ generate_proto("../src/google/protobuf/unittest_mset.proto") ++ generate_proto("../src/google/protobuf/unittest_no_generic_services.proto") ++ generate_proto("google/protobuf/internal/descriptor_pool_test1.proto") ++ generate_proto("google/protobuf/internal/descriptor_pool_test2.proto") ++ generate_proto("google/protobuf/internal/test_bad_identifiers.proto") ++ generate_proto("google/protobuf/internal/missing_enum_values.proto") ++ generate_proto("google/protobuf/internal/more_extensions.proto") ++ generate_proto("google/protobuf/internal/more_extensions_dynamic.proto") ++ generate_proto("google/protobuf/internal/more_messages.proto") ++ generate_proto("google/protobuf/internal/factory_test1.proto") ++ generate_proto("google/protobuf/internal/factory_test2.proto") ++ generate_proto("google/protobuf/pyext/python.proto") ++ ++def MakeTestSuite(): ++ # Test C++ implementation ++ import unittest ++ import google.protobuf.pyext.descriptor_cpp2_test as descriptor_cpp2_test ++ import google.protobuf.pyext.message_factory_cpp2_test \ ++ as message_factory_cpp2_test ++ import google.protobuf.pyext.reflection_cpp2_generated_test \ ++ as reflection_cpp2_generated_test ++ ++ loader = unittest.defaultTestLoader ++ suite = unittest.TestSuite() ++ for test in [ descriptor_cpp2_test, ++ message_factory_cpp2_test, ++ reflection_cpp2_generated_test]: ++ suite.addTest(loader.loadTestsFromModule(test)) ++ return suite ++ ++class clean(_clean): ++ def run(self): ++ # Delete generated files in the code tree. ++ for (dirpath, dirnames, filenames) in os.walk("."): ++ for filename in filenames: ++ filepath = os.path.join(dirpath, filename) ++ if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \ ++ filepath.endswith(".so") or filepath.endswith(".o") or \ ++ filepath.endswith('google/protobuf/compiler/__init__.py'): ++ os.remove(filepath) ++ # _clean is an old-style class, so super() doesn't work. ++ _clean.run(self) ++ ++class build_py(_build_py): ++ def run(self): ++ # Generate necessary .proto file if it doesn't exist. ++ generate_proto("../src/google/protobuf/descriptor.proto") ++ generate_proto("../src/google/protobuf/compiler/plugin.proto") ++ GenerateUnittestProtos() ++ ++ # Make sure google.protobuf/** are valid packages. ++ for path in ['', 'internal/', 'compiler/', 'pyext/']: ++ try: ++ open('google/protobuf/%s__init__.py' % path, 'a').close() ++ except EnvironmentError: ++ pass ++ # _build_py is an old-style class, so super() doesn't work. ++ _build_py.run(self) ++ # TODO(mrovner): Subclass to run 2to3 on some files only. ++ # Tracing what https://wiki.python.org/moin/PortingPythonToPy3k's "Approach 2" ++ # section on how to get 2to3 to run on source files during install under ++ # Python 3. This class seems like a good place to put logic that calls ++ # python3's distutils.util.run_2to3 on the subset of the files we have in our ++ # release that are subject to conversion. ++ # See code reference in previous code review. ++ ++if __name__ == '__main__': ++ ext_module_list = [] ++ cpp_impl = '--cpp_implementation' ++ if cpp_impl in sys.argv: ++ sys.argv.remove(cpp_impl) ++ # C++ implementation extension ++ ext_module_list.append(Extension( ++ "google.protobuf.pyext._message", ++ [ "google/protobuf/pyext/descriptor.cc", ++ "google/protobuf/pyext/message.cc", ++ "google/protobuf/pyext/extension_dict.cc", ++ "google/protobuf/pyext/repeated_scalar_container.cc", ++ "google/protobuf/pyext/repeated_composite_container.cc" ], ++ define_macros=[('GOOGLE_PROTOBUF_HAS_ONEOF', '1')], ++ include_dirs = [ ".", "../src"], ++ libraries = [ "protobuf" ], ++ library_dirs = [ '../src/.libs' ], ++ )) ++ ++ setup(name = 'protobuf', ++ version = '2.6.1', ++ packages = [ 'google' ], ++ namespace_packages = [ 'google' ], ++ test_suite = 'setup.MakeTestSuite', ++ google_test_dir = "google/protobuf/internal", ++ # Must list modules explicitly so that we don't install tests. ++ py_modules = [ ++ 'google.protobuf.internal.api_implementation', ++ 'google.protobuf.internal.containers', ++ 'google.protobuf.internal.cpp_message', ++ 'google.protobuf.internal.decoder', ++ 'google.protobuf.internal.encoder', ++ 'google.protobuf.internal.enum_type_wrapper', ++ 'google.protobuf.internal.message_listener', ++ 'google.protobuf.internal.python_message', ++ 'google.protobuf.internal.type_checkers', ++ 'google.protobuf.internal.wire_format', ++ 'google.protobuf.descriptor', ++ 'google.protobuf.descriptor_pb2', ++ 'google.protobuf.compiler.plugin_pb2', ++ 'google.protobuf.message', ++ 'google.protobuf.descriptor_database', ++ 'google.protobuf.descriptor_pool', ++ 'google.protobuf.message_factory', ++ 'google.protobuf.pyext.cpp_message', ++ 'google.protobuf.reflection', ++ 'google.protobuf.service', ++ 'google.protobuf.service_reflection', ++ 'google.protobuf.symbol_database', ++ 'google.protobuf.text_encoding', ++ 'google.protobuf.text_format'], ++ cmdclass = { 'clean': clean, 'build_py': build_py }, ++ install_requires = ['setuptools'], ++ setup_requires = ['google-apputils'], ++ ext_modules = ext_module_list, ++ url = 'https://developers.google.com/protocol-buffers/', ++ maintainer = maintainer_email, ++ maintainer_email = 'protobuf@googlegroups.com', ++ license = 'New BSD License', ++ description = 'Protocol Buffers', ++ long_description = ++ "Protocol Buffers are Google's data interchange format.", ++ ) +--- /dev/null ++++ protobuf-2.6.1/python3/stubout.py +@@ -0,0 +1,140 @@ ++#!/usr/bin/python2.4 ++# ++# Copyright 2008 Google Inc. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); ++# you may not use this file except in compliance with the License. ++# You may obtain a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, ++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++# See the License for the specific language governing permissions and ++# limitations under the License. ++ ++# This file is used for testing. The original is at: ++# http://code.google.com/p/pymox/ ++ ++class StubOutForTesting: ++ """Sample Usage: ++ You want os.path.exists() to always return true during testing. ++ ++ stubs = StubOutForTesting() ++ stubs.Set(os.path, 'exists', lambda x: 1) ++ ... ++ stubs.UnsetAll() ++ ++ The above changes os.path.exists into a lambda that returns 1. Once ++ the ... part of the code finishes, the UnsetAll() looks up the old value ++ of os.path.exists and restores it. ++ ++ """ ++ def __init__(self): ++ self.cache = [] ++ self.stubs = [] ++ ++ def __del__(self): ++ self.SmartUnsetAll() ++ self.UnsetAll() ++ ++ def SmartSet(self, obj, attr_name, new_attr): ++ """Replace obj.attr_name with new_attr. This method is smart and works ++ at the module, class, and instance level while preserving proper ++ inheritance. It will not stub out C types however unless that has been ++ explicitly allowed by the type. ++ ++ This method supports the case where attr_name is a staticmethod or a ++ classmethod of obj. ++ ++ Notes: ++ - If obj is an instance, then it is its class that will actually be ++ stubbed. Note that the method Set() does not do that: if obj is ++ an instance, it (and not its class) will be stubbed. ++ - The stubbing is using the builtin getattr and setattr. So, the __get__ ++ and __set__ will be called when stubbing (TODO: A better idea would ++ probably be to manipulate obj.__dict__ instead of getattr() and ++ setattr()). ++ ++ Raises AttributeError if the attribute cannot be found. ++ """ ++ if (inspect.ismodule(obj) or ++ (not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))): ++ orig_obj = obj ++ orig_attr = getattr(obj, attr_name) ++ ++ else: ++ if not inspect.isclass(obj): ++ mro = list(inspect.getmro(obj.__class__)) ++ else: ++ mro = list(inspect.getmro(obj)) ++ ++ mro.reverse() ++ ++ orig_attr = None ++ ++ for cls in mro: ++ try: ++ orig_obj = cls ++ orig_attr = getattr(obj, attr_name) ++ except AttributeError: ++ continue ++ ++ if orig_attr is None: ++ raise AttributeError("Attribute not found.") ++ ++ # Calling getattr() on a staticmethod transforms it to a 'normal' function. ++ # We need to ensure that we put it back as a staticmethod. ++ old_attribute = obj.__dict__.get(attr_name) ++ if old_attribute is not None and isinstance(old_attribute, staticmethod): ++ orig_attr = staticmethod(orig_attr) ++ ++ self.stubs.append((orig_obj, attr_name, orig_attr)) ++ setattr(orig_obj, attr_name, new_attr) ++ ++ def SmartUnsetAll(self): ++ """Reverses all the SmartSet() calls, restoring things to their original ++ definition. Its okay to call SmartUnsetAll() repeatedly, as later calls ++ have no effect if no SmartSet() calls have been made. ++ ++ """ ++ self.stubs.reverse() ++ ++ for args in self.stubs: ++ setattr(*args) ++ ++ self.stubs = [] ++ ++ def Set(self, parent, child_name, new_child): ++ """Replace child_name's old definition with new_child, in the context ++ of the given parent. The parent could be a module when the child is a ++ function at module scope. Or the parent could be a class when a class' ++ method is being replaced. The named child is set to new_child, while ++ the prior definition is saved away for later, when UnsetAll() is called. ++ ++ This method supports the case where child_name is a staticmethod or a ++ classmethod of parent. ++ """ ++ old_child = getattr(parent, child_name) ++ ++ old_attribute = parent.__dict__.get(child_name) ++ if old_attribute is not None and isinstance(old_attribute, staticmethod): ++ old_child = staticmethod(old_child) ++ ++ self.cache.append((parent, old_child, child_name)) ++ setattr(parent, child_name, new_child) ++ ++ def UnsetAll(self): ++ """Reverses all the Set() calls, restoring things to their original ++ definition. Its okay to call UnsetAll() repeatedly, as later calls have ++ no effect if no Set() calls have been made. ++ ++ """ ++ # Undo calls to Set() in reverse order, in case Set() was called on the ++ # same arguments repeatedly (want the original call to be last one undone) ++ self.cache.reverse() ++ ++ for (parent, old_child, child_name) in self.cache: ++ setattr(parent, child_name, old_child) ++ self.cache = [] diff -Nru protobuf-2.6.1/debian/patches/fix-long-int-bugs.patch protobuf-2.6.1/debian/patches/fix-long-int-bugs.patch --- protobuf-2.6.1/debian/patches/fix-long-int-bugs.patch 1970-01-01 00:00:00.000000000 +0000 +++ protobuf-2.6.1/debian/patches/fix-long-int-bugs.patch 2018-01-10 12:38:31.000000000 +0000 @@ -0,0 +1,114 @@ +Description: Fixing some long/int bugs +Origin: backport, https://github.com/google/protobuf/commit/fe7d9379df3ce7c951bc0652a451413cff02382a +Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/protobuf/+bug/1735160 +Last-Update: 2018-01-10 + +--- protobuf-2.6.1.orig/python/google/protobuf/internal/decoder.py ++++ protobuf-2.6.1/python/google/protobuf/internal/decoder.py +@@ -86,6 +86,9 @@ + + import six + ++if six.PY3: ++ long = int ++ + from google.protobuf.internal import encoder + from google.protobuf.internal import wire_format + from google.protobuf import message +@@ -157,8 +160,8 @@ + # alternate implementations where the distinction is more significant + # (e.g. the C++ implementation) simpler. + +-_DecodeVarint = _VarintDecoder((1 << 64) - 1, int) +-_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1, int) ++_DecodeVarint = _VarintDecoder((1 << 64) - 1, long) ++_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1, long) + + # Use these versions for values which must be limited to 32 bits. + _DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int) +--- protobuf-2.6.1.orig/python/google/protobuf/internal/reflection_test.py ++++ protobuf-2.6.1/python/google/protobuf/internal/reflection_test.py +@@ -621,17 +621,17 @@ + TestGetAndDeserialize('optional_int32', 1, int) + TestGetAndDeserialize('optional_int32', 1 << 30, int) + TestGetAndDeserialize('optional_uint32', 1 << 30, int) ++ try: ++ integer_64 = long ++ except NameError: # Python3 ++ integer_64 = int + if struct.calcsize('L') == 4: + # Python only has signed ints, so 32-bit python can't fit an uint32 + # in an int. +- TestGetAndDeserialize('optional_uint32', 1 << 31, int) ++ TestGetAndDeserialize('optional_uint32', 1 << 31, long) + else: + # 64-bit python can fit uint32 inside an int + TestGetAndDeserialize('optional_uint32', 1 << 31, int) +- try: +- integer_64 = long +- except NameError: # Python3 +- integer_64 = int + TestGetAndDeserialize('optional_int64', 1 << 30, integer_64) + TestGetAndDeserialize('optional_int64', 1 << 60, integer_64) + TestGetAndDeserialize('optional_uint64', 1 << 30, integer_64) +--- protobuf-2.6.1.orig/python/google/protobuf/internal/type_checkers.py ++++ protobuf-2.6.1/python/google/protobuf/internal/type_checkers.py +@@ -49,6 +49,9 @@ + + import six + ++if six.PY3: ++ long = int ++ + from google.protobuf.internal import decoder + from google.protobuf.internal import encoder + from google.protobuf.internal import wire_format +@@ -181,13 +184,13 @@ + class Int64ValueChecker(IntValueChecker): + _MIN = -(1 << 63) + _MAX = (1 << 63) - 1 +- _TYPE = int ++ _TYPE = long + + + class Uint64ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 64) - 1 +- _TYPE = int ++ _TYPE = long + + + # Type-checkers for all scalar CPPTYPEs. +@@ -197,9 +200,9 @@ + _FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(), + _FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker( +- float, int, int), ++ float, int, long), + _FieldDescriptor.CPPTYPE_FLOAT: TypeChecker( +- float, int, int), ++ float, int, long), + _FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int), + _FieldDescriptor.CPPTYPE_STRING: TypeChecker(bytes), + } +--- protobuf-2.6.1.orig/python/google/protobuf/text_format.py ++++ protobuf-2.6.1/python/google/protobuf/text_format.py +@@ -39,6 +39,9 @@ + + import six + ++if six.PY3: ++ long = int ++ + from google.protobuf.internal import type_checkers + from google.protobuf import descriptor + from google.protobuf import text_encoding +@@ -772,7 +775,7 @@ + # alternate implementations where the distinction is more significant + # (e.g. the C++ implementation) simpler. + if is_long: +- result = int(text, 0) ++ result = long(text, 0) + else: + result = int(text, 0) + except ValueError: diff -Nru protobuf-2.6.1/debian/patches/python-modernize.patch protobuf-2.6.1/debian/patches/python-modernize.patch --- protobuf-2.6.1/debian/patches/python-modernize.patch 1970-01-01 00:00:00.000000000 +0000 +++ protobuf-2.6.1/debian/patches/python-modernize.patch 2018-01-10 12:33:08.000000000 +0000 @@ -0,0 +1,1088 @@ +Description: Prepare for Python2-Python3 straddle. +Origin: backport, https://github.com/google/protobuf/commit/f336d4b7a5c1d369ed508e513d482c885705e939 +Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/protobuf/+bug/1735160 +Last-Update: 2018-01-10 + +--- protobuf-2.6.1.orig/python/google/protobuf/descriptor.py ++++ protobuf-2.6.1/python/google/protobuf/descriptor.py +@@ -28,8 +28,6 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-# Needs to stay compatible with Python 2.5 due to GAE. +-# + # Copyright 2007 Google Inc. All Rights Reserved. + + """Descriptors essentially contain exactly the information found in a .proto +@@ -846,4 +844,4 @@ + + desc_name = '.'.join(full_message_name) + return Descriptor(desc_proto.name, desc_name, None, None, fields, +- nested_types.values(), enum_types.values(), []) ++ list(nested_types.values()), list(enum_types.values()), []) +--- protobuf-2.6.1.orig/python/google/protobuf/descriptor_pool.py ++++ protobuf-2.6.1/python/google/protobuf/descriptor_pool.py +@@ -57,8 +57,6 @@ + + __author__ = 'matthewtoia@google.com (Matt Toia)' + +-import sys +- + from google.protobuf import descriptor + from google.protobuf import descriptor_database + from google.protobuf import text_encoding +@@ -175,8 +173,7 @@ + + try: + file_proto = self._internal_db.FindFileByName(file_name) +- except KeyError: +- _, error, _ = sys.exc_info() #PY25 compatible for GAE. ++ except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileByName(file_name) + else: +@@ -211,8 +208,7 @@ + + try: + file_proto = self._internal_db.FindFileContainingSymbol(symbol) +- except KeyError: +- _, error, _ = sys.exc_info() #PY25 compatible for GAE. ++ except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) + else: +@@ -282,9 +278,9 @@ + # file proto. + for dependency in built_deps: + scope.update(self._ExtractSymbols( +- dependency.message_types_by_name.values())) ++ list(dependency.message_types_by_name.values()))) + scope.update((_PrefixWithDot(enum.full_name), enum) +- for enum in dependency.enum_types_by_name.values()) ++ for enum in list(dependency.enum_types_by_name.values())) + + for message_type in file_proto.message_type: + message_desc = self._ConvertMessageDescriptor( +--- protobuf-2.6.1.orig/python/google/protobuf/internal/cpp_message.py ++++ protobuf-2.6.1/python/google/protobuf/internal/cpp_message.py +@@ -34,8 +34,12 @@ + + __author__ = 'petar@google.com (Petar Petrov)' + +-import copy_reg ++import collections + import operator ++ ++import six ++import six.moves.copyreg ++ + from google.protobuf.internal import _net_proto2___python + from google.protobuf.internal import enum_type_wrapper + from google.protobuf import message +@@ -146,7 +150,7 @@ + def __eq__(self, other): + if self is other: + return True +- if not operator.isSequenceType(other): ++ if not isinstance(other, collections.Sequence): + raise TypeError( + 'Can only compare repeated scalar fields against sequences.') + # We are presumably comparing against some other sequence type. +@@ -259,7 +263,7 @@ + index_key = lambda i: key(self[i]) + + # Sort the list of current indexes by the underlying object. +- indexes = range(len(self)) ++ indexes = list(range(len(self))) + indexes.sort(cmp=cmp, key=index_key, reverse=reverse) + + # Apply the transposition. +@@ -385,7 +389,7 @@ + _AddInitMethod(message_descriptor, cls) + _AddMessageMethods(message_descriptor, cls) + _AddPropertiesForExtensions(message_descriptor, cls) +- copy_reg.pickle(cls, lambda obj: (cls, (), obj.__getstate__())) ++ six.moves.copyreg.pickle(cls, lambda obj: (cls, (), obj.__getstate__())) + + + def _AddDescriptors(message_descriptor, dictionary): +@@ -400,7 +404,7 @@ + dictionary['__descriptors'][field.name] = GetFieldDescriptor( + field.full_name) + +- dictionary['__slots__'] = list(dictionary['__descriptors'].iterkeys()) + [ ++ dictionary['__slots__'] = list(dictionary['__descriptors'].keys()) + [ + '_cmsg', '_owner', '_composite_fields', 'Extensions', '_HACK_REFCOUNTS'] + + +@@ -420,7 +424,7 @@ + def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary): + """Adds class attributes for the nested extensions.""" + extension_dict = message_descriptor.extensions_by_name +- for extension_name, extension_field in extension_dict.iteritems(): ++ for extension_name, extension_field in extension_dict.items(): + assert extension_name not in dictionary + dictionary[extension_name] = extension_field + +@@ -474,7 +478,7 @@ + self._HACK_REFCOUNTS = self + self._composite_fields = {} + +- for field_name, field_value in kwargs.iteritems(): ++ for field_name, field_value in kwargs.items(): + field_cdescriptor = self.__descriptors.get(field_name, None) + if not field_cdescriptor: + raise ValueError('Protocol message has no "%s" field.' % field_name) +@@ -538,7 +542,7 @@ + + def Clear(self): + cmessages_to_release = [] +- for field_name, child_field in self._composite_fields.iteritems(): ++ for field_name, child_field in self._composite_fields.items(): + child_cdescriptor = self.__descriptors[field_name] + # TODO(anuraag): Support clearing repeated message fields as well. + if (child_cdescriptor.label != _LABEL_REPEATED and +@@ -631,7 +635,7 @@ + return text_format.MessageToString(self, as_utf8=True).decode('utf-8') + + # Attach the local methods to the message class. +- for key, value in locals().copy().iteritems(): ++ for key, value in locals().copy().items(): + if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'): + setattr(cls, key, value) + +@@ -658,6 +662,6 @@ + def _AddPropertiesForExtensions(message_descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + extension_dict = message_descriptor.extensions_by_name +- for extension_name, extension_field in extension_dict.iteritems(): ++ for extension_name, extension_field in extension_dict.items(): + constant_name = extension_name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, extension_field.number) +--- protobuf-2.6.1.orig/python/google/protobuf/internal/decoder.py ++++ protobuf-2.6.1/python/google/protobuf/internal/decoder.py +@@ -28,8 +28,6 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-#PY25 compatible for GAE. +-# + # Copyright 2009 Google Inc. All Rights Reserved. + + """Code for decoding protocol buffer primitives. +@@ -85,8 +83,9 @@ + __author__ = 'kenton@google.com (Kenton Varda)' + + import struct +-import sys ##PY25 +-_PY2 = sys.version_info[0] < 3 ##PY25 ++ ++import six ++ + from google.protobuf.internal import encoder + from google.protobuf.internal import wire_format + from google.protobuf import message +@@ -114,14 +113,11 @@ + decoder returns a (value, new_pos) pair. + """ + +- local_ord = ord +- py2 = _PY2 ##PY25 +-##!PY25 py2 = str is bytes + def DecodeVarint(buffer, pos): + result = 0 + shift = 0 + while 1: +- b = local_ord(buffer[pos]) if py2 else buffer[pos] ++ b = six.indexbytes(buffer, pos) + result |= ((b & 0x7f) << shift) + pos += 1 + if not (b & 0x80): +@@ -137,14 +133,11 @@ + def _SignedVarintDecoder(mask, result_type): + """Like _VarintDecoder() but decodes signed values.""" + +- local_ord = ord +- py2 = _PY2 ##PY25 +-##!PY25 py2 = str is bytes + def DecodeVarint(buffer, pos): + result = 0 + shift = 0 + while 1: +- b = local_ord(buffer[pos]) if py2 else buffer[pos] ++ b = six.indexbytes(buffer, pos) + result |= ((b & 0x7f) << shift) + pos += 1 + if not (b & 0x80): +@@ -164,8 +157,8 @@ + # alternate implementations where the distinction is more significant + # (e.g. the C++ implementation) simpler. + +-_DecodeVarint = _VarintDecoder((1 << 64) - 1, long) +-_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1, long) ++_DecodeVarint = _VarintDecoder((1 << 64) - 1, int) ++_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1, int) + + # Use these versions for values which must be limited to 32 bits. + _DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int) +@@ -183,10 +176,8 @@ + use that, but not in Python. + """ + +- py2 = _PY2 ##PY25 +-##!PY25 py2 = str is bytes + start = pos +- while (ord(buffer[pos]) if py2 else buffer[pos]) & 0x80: ++ while six.indexbytes(buffer, pos) & 0x80: + pos += 1 + pos += 1 + return (buffer[start:pos], pos) +@@ -301,7 +292,6 @@ + """ + + local_unpack = struct.unpack +- b = (lambda x:x) if _PY2 else lambda x:x.encode('latin1') ##PY25 + + def InnerDecode(buffer, pos): + # We expect a 32-bit value in little-endian byte order. Bit 1 is the sign +@@ -312,17 +302,12 @@ + # If this value has all its exponent bits set, then it's non-finite. + # In Python 2.4, struct.unpack will convert it to a finite 64-bit value. + # To avoid that, we parse it specially. +- if ((float_bytes[3:4] in b('\x7F\xFF')) ##PY25 +-##!PY25 if ((float_bytes[3:4] in b'\x7F\xFF') +- and (float_bytes[2:3] >= b('\x80'))): ##PY25 +-##!PY25 and (float_bytes[2:3] >= b'\x80')): ++ if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'): + # If at least one significand bit is set... +- if float_bytes[0:3] != b('\x00\x00\x80'): ##PY25 +-##!PY25 if float_bytes[0:3] != b'\x00\x00\x80': ++ if float_bytes[0:3] != b'\x00\x00\x80': + return (_NAN, new_pos) + # If sign bit is set... +- if float_bytes[3:4] == b('\xFF'): ##PY25 +-##!PY25 if float_bytes[3:4] == b'\xFF': ++ if float_bytes[3:4] == b'\xFF': + return (_NEG_INF, new_pos) + return (_POS_INF, new_pos) + +@@ -341,7 +326,6 @@ + """ + + local_unpack = struct.unpack +- b = (lambda x:x) if _PY2 else lambda x:x.encode('latin1') ##PY25 + + def InnerDecode(buffer, pos): + # We expect a 64-bit value in little-endian byte order. Bit 1 is the sign +@@ -352,12 +336,9 @@ + # If this value has all its exponent bits set and at least one significand + # bit set, it's not a number. In Python 2.4, struct.unpack will treat it + # as inf or -inf. To avoid that, we treat it specially. +-##!PY25 if ((double_bytes[7:8] in b'\x7F\xFF') +-##!PY25 and (double_bytes[6:7] >= b'\xF0') +-##!PY25 and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')): +- if ((double_bytes[7:8] in b('\x7F\xFF')) ##PY25 +- and (double_bytes[6:7] >= b('\xF0')) ##PY25 +- and (double_bytes[0:7] != b('\x00\x00\x00\x00\x00\x00\xF0'))): ##PY25 ++ if ((double_bytes[7:8] in b'\x7F\xFF') ++ and (double_bytes[6:7] >= b'\xF0') ++ and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')): + return (_NAN, new_pos) + + # Note that we expect someone up-stack to catch struct.error and convert +@@ -480,12 +461,12 @@ + """Returns a decoder for a string field.""" + + local_DecodeVarint = _DecodeVarint +- local_unicode = unicode ++ local_unicode = six.text_type + + def _ConvertToUnicode(byte_str): + try: + return local_unicode(byte_str, 'utf-8') +- except UnicodeDecodeError, e: ++ except UnicodeDecodeError as e: + # add more information to the error message and re-raise it. + e.reason = '%s in field: %s' % (e, key.full_name) + raise +--- protobuf-2.6.1.orig/python/google/protobuf/internal/encoder.py ++++ protobuf-2.6.1/python/google/protobuf/internal/encoder.py +@@ -28,8 +28,6 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-#PY25 compatible for GAE. +-# + # Copyright 2009 Google Inc. All Rights Reserved. + + """Code for encoding protocol message primitives. +@@ -71,8 +69,9 @@ + __author__ = 'kenton@google.com (Kenton Varda)' + + import struct +-import sys ##PY25 +-_PY2 = sys.version_info[0] < 3 ##PY25 ++ ++import six ++ + from google.protobuf.internal import wire_format + + +@@ -346,16 +345,14 @@ + def _VarintEncoder(): + """Return an encoder for a basic varint value (does not include tag).""" + +- local_chr = _PY2 and chr or (lambda x: bytes((x,))) ##PY25 +-##!PY25 local_chr = chr if bytes is str else lambda x: bytes((x,)) + def EncodeVarint(write, value): + bits = value & 0x7f + value >>= 7 + while value: +- write(local_chr(0x80|bits)) ++ write(six.int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 +- return write(local_chr(bits)) ++ return write(six.int2byte(bits)) + + return EncodeVarint + +@@ -364,18 +361,16 @@ + """Return an encoder for a basic signed varint value (does not include + tag).""" + +- local_chr = _PY2 and chr or (lambda x: bytes((x,))) ##PY25 +-##!PY25 local_chr = chr if bytes is str else lambda x: bytes((x,)) + def EncodeSignedVarint(write, value): + if value < 0: + value += (1 << 64) + bits = value & 0x7f + value >>= 7 + while value: +- write(local_chr(0x80|bits)) ++ write(six.int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 +- return write(local_chr(bits)) ++ return write(six.int2byte(bits)) + + return EncodeSignedVarint + +@@ -390,8 +385,7 @@ + + pieces = [] + _EncodeVarint(pieces.append, value) +- return "".encode("latin1").join(pieces) ##PY25 +-##!PY25 return b"".join(pieces) ++ return b"".join(pieces) + + + def TagBytes(field_number, wire_type): +@@ -529,33 +523,26 @@ + format: The format string to pass to struct.pack(). + """ + +- b = _PY2 and (lambda x:x) or (lambda x:x.encode('latin1')) ##PY25 + value_size = struct.calcsize(format) + if value_size == 4: + def EncodeNonFiniteOrRaise(write, value): + # Remember that the serialized form uses little-endian byte order. + if value == _POS_INF: +- write(b('\x00\x00\x80\x7F')) ##PY25 +-##!PY25 write(b'\x00\x00\x80\x7F') ++ write(b'\x00\x00\x80\x7F') + elif value == _NEG_INF: +- write(b('\x00\x00\x80\xFF')) ##PY25 +-##!PY25 write(b'\x00\x00\x80\xFF') ++ write(b'\x00\x00\x80\xFF') + elif value != value: # NaN +- write(b('\x00\x00\xC0\x7F')) ##PY25 +-##!PY25 write(b'\x00\x00\xC0\x7F') ++ write(b'\x00\x00\xC0\x7F') + else: + raise + elif value_size == 8: + def EncodeNonFiniteOrRaise(write, value): + if value == _POS_INF: +- write(b('\x00\x00\x00\x00\x00\x00\xF0\x7F')) ##PY25 +-##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') ++ write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') + elif value == _NEG_INF: +- write(b('\x00\x00\x00\x00\x00\x00\xF0\xFF')) ##PY25 +-##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') ++ write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') + elif value != value: # NaN +- write(b('\x00\x00\x00\x00\x00\x00\xF8\x7F')) ##PY25 +-##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') ++ write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') + else: + raise + else: +@@ -631,10 +618,8 @@ + def BoolEncoder(field_number, is_repeated, is_packed): + """Returns an encoder for a boolean field.""" + +-##!PY25 false_byte = b'\x00' +-##!PY25 true_byte = b'\x01' +- false_byte = '\x00'.encode('latin1') ##PY25 +- true_byte = '\x01'.encode('latin1') ##PY25 ++ false_byte = b'\x00' ++ true_byte = b'\x01' + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint +@@ -770,8 +755,7 @@ + } + } + """ +- start_bytes = "".encode("latin1").join([ ##PY25 +-##!PY25 start_bytes = b"".join([ ++ start_bytes = b"".join([ + TagBytes(1, wire_format.WIRETYPE_START_GROUP), + TagBytes(2, wire_format.WIRETYPE_VARINT), + _VarintBytes(field_number), +--- protobuf-2.6.1.orig/python/google/protobuf/internal/generator_test.py ++++ protobuf-2.6.1/python/google/protobuf/internal/generator_test.py +@@ -294,7 +294,7 @@ + self.assertSameElements( + nested_names, + [field.name for field in desc.oneofs[0].fields]) +- for field_name, field_desc in desc.fields_by_name.iteritems(): ++ for field_name, field_desc in desc.fields_by_name.items(): + if field_name in nested_names: + self.assertIs(desc.oneofs[0], field_desc.containing_oneof) + else: +--- protobuf-2.6.1.orig/python/google/protobuf/internal/message_factory_test.py ++++ protobuf-2.6.1/python/google/protobuf/internal/message_factory_test.py +@@ -107,14 +107,14 @@ + self.assertContainsSubset( + ['google.protobuf.python.internal.Factory2Message', + 'google.protobuf.python.internal.Factory1Message'], +- messages.keys()) ++ list(messages.keys())) + self._ExerciseDynamicClass( + messages['google.protobuf.python.internal.Factory2Message']) + self.assertContainsSubset( + ['google.protobuf.python.internal.Factory2Message.one_more_field', + 'google.protobuf.python.internal.another_field'], +- (messages['google.protobuf.python.internal.Factory1Message'] +- ._extensions_by_name.keys())) ++ (list(messages['google.protobuf.python.internal.Factory1Message'] ++ ._extensions_by_name.keys()))) + factory_msg1 = messages['google.protobuf.python.internal.Factory1Message'] + msg1 = messages['google.protobuf.python.internal.Factory1Message']() + ext1 = factory_msg1._extensions_by_name[ +--- protobuf-2.6.1.orig/python/google/protobuf/internal/python_message.py ++++ protobuf-2.6.1/python/google/protobuf/internal/python_message.py +@@ -28,8 +28,6 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-# Keep it Python2.5 compatible for GAE. +-# + # Copyright 2007 Google Inc. All Rights Reserved. + # + # This code is meant to work on Python 2.4 and above only. +@@ -54,19 +52,14 @@ + + __author__ = 'robinson@google.com (Will Robinson)' + ++from io import BytesIO + import sys +-if sys.version_info[0] < 3: +- try: +- from cStringIO import StringIO as BytesIO +- except ImportError: +- from StringIO import StringIO as BytesIO +- import copy_reg as copyreg +-else: +- from io import BytesIO +- import copyreg + import struct + import weakref + ++import six ++import six.moves.copyreg as copyreg ++ + # We use "as" to avoid name collisions with variables. + from google.protobuf.internal import containers + from google.protobuf.internal import decoder +@@ -237,7 +230,7 @@ + + def _AddClassAttributesForNestedExtensions(descriptor, dictionary): + extension_dict = descriptor.extensions_by_name +- for extension_name, extension_field in extension_dict.iteritems(): ++ for extension_name, extension_field in extension_dict.items(): + assert extension_name not in dictionary + dictionary[extension_name] = extension_field + +@@ -323,7 +316,7 @@ + self._is_present_in_parent = False + self._listener = message_listener_mod.NullMessageListener() + self._listener_for_children = _Listener(self) +- for field_name, field_value in kwargs.iteritems(): ++ for field_name, field_value in kwargs.items(): + field = _GetFieldByName(message_descriptor, field_name) + if field is None: + raise TypeError("%s() got an unexpected keyword argument '%s'" % +@@ -546,7 +539,7 @@ + def _AddPropertiesForExtensions(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + extension_dict = descriptor.extensions_by_name +- for extension_name, extension_field in extension_dict.iteritems(): ++ for extension_name, extension_field in extension_dict.items(): + constant_name = extension_name.upper() + "_FIELD_NUMBER" + setattr(cls, constant_name, extension_field.number) + +@@ -601,7 +594,7 @@ + """Helper for _AddMessageMethods().""" + + def ListFields(self): +- all_fields = [item for item in self._fields.iteritems() if _IsPresent(item)] ++ all_fields = [item for item in self._fields.items() if _IsPresent(item)] + all_fields.sort(key = lambda item: item[0].number) + return all_fields + +@@ -845,7 +838,7 @@ + except (IndexError, TypeError): + # Now ord(buf[p:p+1]) == ord('') gets TypeError. + raise message_mod.DecodeError('Truncated message.') +- except struct.error, e: ++ except struct.error as e: + raise message_mod.DecodeError(e) + return length # Return this for legacy reasons. + cls.MergeFromString = MergeFromString +@@ -945,7 +938,7 @@ + name = field.name + + if field.label == _FieldDescriptor.LABEL_REPEATED: +- for i in xrange(len(value)): ++ for i in range(len(value)): + element = value[i] + prefix = "%s[%d]." % (name, i) + sub_errors = element.FindInitializationErrors() +@@ -975,7 +968,7 @@ + + fields = self._fields + +- for field, value in msg._fields.iteritems(): ++ for field, value in msg._fields.items(): + if field.label == LABEL_REPEATED: + field_value = fields.get(field) + if field_value is None: +--- protobuf-2.6.1.orig/python/google/protobuf/internal/reflection_test.py ++++ protobuf-2.6.1/python/google/protobuf/internal/reflection_test.py +@@ -42,6 +42,8 @@ + import operator + import struct + ++import six ++ + from google.apputils import basetest + from google.protobuf import unittest_import_pb2 + from google.protobuf import unittest_mset_pb2 +@@ -469,7 +471,7 @@ + proto.repeated_string.extend(['foo', 'bar']) + proto.repeated_string.extend([]) + proto.repeated_string.append('baz') +- proto.repeated_string.extend(str(x) for x in xrange(2)) ++ proto.repeated_string.extend(str(x) for x in range(2)) + proto.optional_int32 = 21 + proto.repeated_bool # Access but don't set anything; should not be listed. + self.assertEqual( +@@ -622,14 +624,18 @@ + if struct.calcsize('L') == 4: + # Python only has signed ints, so 32-bit python can't fit an uint32 + # in an int. +- TestGetAndDeserialize('optional_uint32', 1 << 31, long) ++ TestGetAndDeserialize('optional_uint32', 1 << 31, int) + else: + # 64-bit python can fit uint32 inside an int + TestGetAndDeserialize('optional_uint32', 1 << 31, int) +- TestGetAndDeserialize('optional_int64', 1 << 30, long) +- TestGetAndDeserialize('optional_int64', 1 << 60, long) +- TestGetAndDeserialize('optional_uint64', 1 << 30, long) +- TestGetAndDeserialize('optional_uint64', 1 << 60, long) ++ try: ++ integer_64 = long ++ except NameError: # Python3 ++ integer_64 = int ++ TestGetAndDeserialize('optional_int64', 1 << 30, integer_64) ++ TestGetAndDeserialize('optional_int64', 1 << 60, integer_64) ++ TestGetAndDeserialize('optional_uint64', 1 << 30, integer_64) ++ TestGetAndDeserialize('optional_uint64', 1 << 60, integer_64) + + def testSingleScalarBoundsChecking(self): + def TestMinAndMaxIntegers(field_name, expected_min, expected_max): +@@ -755,18 +761,18 @@ + + def testEnum_KeysAndValues(self): + self.assertEqual(['FOREIGN_FOO', 'FOREIGN_BAR', 'FOREIGN_BAZ'], +- unittest_pb2.ForeignEnum.keys()) ++ list(unittest_pb2.ForeignEnum.keys())) + self.assertEqual([4, 5, 6], +- unittest_pb2.ForeignEnum.values()) ++ list(unittest_pb2.ForeignEnum.values())) + self.assertEqual([('FOREIGN_FOO', 4), ('FOREIGN_BAR', 5), + ('FOREIGN_BAZ', 6)], +- unittest_pb2.ForeignEnum.items()) ++ list(unittest_pb2.ForeignEnum.items())) + + proto = unittest_pb2.TestAllTypes() +- self.assertEqual(['FOO', 'BAR', 'BAZ', 'NEG'], proto.NestedEnum.keys()) +- self.assertEqual([1, 2, 3, -1], proto.NestedEnum.values()) ++ self.assertEqual(['FOO', 'BAR', 'BAZ', 'NEG'], list(proto.NestedEnum.keys())) ++ self.assertEqual([1, 2, 3, -1], list(proto.NestedEnum.values())) + self.assertEqual([('FOO', 1), ('BAR', 2), ('BAZ', 3), ('NEG', -1)], +- proto.NestedEnum.items()) ++ list(proto.NestedEnum.items())) + + def testRepeatedScalars(self): + proto = unittest_pb2.TestAllTypes() +@@ -805,7 +811,7 @@ + self.assertEqual([5, 25, 20, 15, 30], proto.repeated_int32[:]) + + # Test slice assignment with an iterator +- proto.repeated_int32[1:4] = (i for i in xrange(3)) ++ proto.repeated_int32[1:4] = (i for i in range(3)) + self.assertEqual([5, 0, 1, 2, 30], proto.repeated_int32) + + # Test slice assignment. +@@ -1008,9 +1014,8 @@ + containing_type=None, nested_types=[], enum_types=[], + fields=[foo_field_descriptor], extensions=[], + options=descriptor_pb2.MessageOptions()) +- class MyProtoClass(message.Message): ++ class MyProtoClass(six.with_metaclass(reflection.GeneratedProtocolMessageType, message.Message)): + DESCRIPTOR = mydescriptor +- __metaclass__ = reflection.GeneratedProtocolMessageType + myproto_instance = MyProtoClass() + self.assertEqual(0, myproto_instance.foo_field) + self.assertTrue(not myproto_instance.HasField('foo_field')) +@@ -1050,14 +1055,13 @@ + new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_REPEATED + + desc = descriptor.MakeDescriptor(desc_proto) +- self.assertTrue(desc.fields_by_name.has_key('name')) +- self.assertTrue(desc.fields_by_name.has_key('year')) +- self.assertTrue(desc.fields_by_name.has_key('automatic')) +- self.assertTrue(desc.fields_by_name.has_key('price')) +- self.assertTrue(desc.fields_by_name.has_key('owners')) ++ self.assertTrue('name' in desc.fields_by_name) ++ self.assertTrue('year' in desc.fields_by_name) ++ self.assertTrue('automatic' in desc.fields_by_name) ++ self.assertTrue('price' in desc.fields_by_name) ++ self.assertTrue('owners' in desc.fields_by_name) + +- class CarMessage(message.Message): +- __metaclass__ = reflection.GeneratedProtocolMessageType ++ class CarMessage(six.with_metaclass(reflection.GeneratedProtocolMessageType, message.Message)): + DESCRIPTOR = desc + + prius = CarMessage() +@@ -1660,14 +1664,14 @@ + setattr, proto, 'optional_bytes', u'unicode object') + + # Check that the default value is of python's 'unicode' type. +- self.assertEqual(type(proto.optional_string), unicode) ++ self.assertEqual(type(proto.optional_string), six.text_type) + +- proto.optional_string = unicode('Testing') ++ proto.optional_string = six.text_type('Testing') + self.assertEqual(proto.optional_string, str('Testing')) + + # Assign a value of type 'str' which can be encoded in UTF-8. + proto.optional_string = str('Testing') +- self.assertEqual(proto.optional_string, unicode('Testing')) ++ self.assertEqual(proto.optional_string, six.text_type('Testing')) + + # Try to assign a 'str' value which contains bytes that aren't 7-bit ASCII. + self.assertRaises(ValueError, +@@ -1715,7 +1719,7 @@ + bytes_read = message2.MergeFromString(raw.item[0].message) + self.assertEqual(len(raw.item[0].message), bytes_read) + +- self.assertEqual(type(message2.str), unicode) ++ self.assertEqual(type(message2.str), six.text_type) + self.assertEqual(message2.str, test_utf8) + + # The pure Python API throws an exception on MergeFromString(), +@@ -1739,7 +1743,7 @@ + def testBytesInTextFormat(self): + proto = unittest_pb2.TestAllTypes(optional_bytes=b'\x00\x7f\x80\xff') + self.assertEqual(u'optional_bytes: "\\000\\177\\200\\377"\n', +- unicode(proto)) ++ six.text_type(proto)) + + def testEmptyNestedMessage(self): + proto = unittest_pb2.TestAllTypes() +@@ -2289,7 +2293,7 @@ + test_util.SetAllFields(first_proto) + serialized = first_proto.SerializeToString() + +- for truncation_point in xrange(len(serialized) + 1): ++ for truncation_point in range(len(serialized) + 1): + try: + second_proto = unittest_pb2.TestAllTypes() + unknown_fields = unittest_pb2.TestEmptyMessage() +@@ -2887,8 +2891,7 @@ + msg_descriptor = descriptor.MakeDescriptor( + file_descriptor.message_type[0]) + +- class MessageClass(message.Message): +- __metaclass__ = reflection.GeneratedProtocolMessageType ++ class MessageClass(six.with_metaclass(reflection.GeneratedProtocolMessageType, message.Message)): + DESCRIPTOR = msg_descriptor + msg = MessageClass() + msg_str = ( +--- protobuf-2.6.1.orig/python/google/protobuf/internal/text_format_test.py ++++ protobuf-2.6.1/python/google/protobuf/internal/text_format_test.py +@@ -36,9 +36,10 @@ + + import re + ++import six ++ + from google.apputils import basetest + from google.protobuf import text_format +-from google.protobuf.internal import api_implementation + from google.protobuf.internal import test_util + from google.protobuf import unittest_pb2 + from google.protobuf import unittest_mset_pb2 +@@ -138,7 +139,7 @@ + 'repeated_string: "\\303\\274\\352\\234\\237"\n') + + def testPrintExoticUnicodeSubclass(self): +- class UnicodeSub(unicode): ++ class UnicodeSub(six.text_type): + pass + message = unittest_pb2.TestAllTypes() + message.repeated_string.append(UnicodeSub(u'\u00fc\ua71f')) +--- protobuf-2.6.1.orig/python/google/protobuf/internal/type_checkers.py ++++ protobuf-2.6.1/python/google/protobuf/internal/type_checkers.py +@@ -28,8 +28,6 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-#PY25 compatible for GAE. +-# + # Copyright 2008 Google Inc. All Rights Reserved. + + """Provides type checking routines. +@@ -49,9 +47,8 @@ + + __author__ = 'robinson@google.com (Will Robinson)' + +-import sys ##PY25 +-if sys.version < '2.6': bytes = str ##PY25 +-from google.protobuf.internal import api_implementation ++import six ++ + from google.protobuf.internal import decoder + from google.protobuf.internal import encoder + from google.protobuf.internal import wire_format +@@ -111,9 +108,9 @@ + """Checker used for integer fields. Performs type-check and range check.""" + + def CheckValue(self, proposed_value): +- if not isinstance(proposed_value, (int, long)): ++ if not isinstance(proposed_value, six.integer_types): + message = ('%.1024r has type %s, but expected one of: %s' % +- (proposed_value, type(proposed_value), (int, long))) ++ (proposed_value, type(proposed_value), six.integer_types)) + raise TypeError(message) + if not self._MIN <= proposed_value <= self._MAX: + raise ValueError('Value out of range: %d' % proposed_value) +@@ -132,9 +129,9 @@ + self._enum_type = enum_type + + def CheckValue(self, proposed_value): +- if not isinstance(proposed_value, (int, long)): ++ if not isinstance(proposed_value, six.integer_types): + message = ('%.1024r has type %s, but expected one of: %s' % +- (proposed_value, type(proposed_value), (int, long))) ++ (proposed_value, type(proposed_value), six.integer_types)) + raise TypeError(message) + if proposed_value not in self._enum_type.values_by_number: + raise ValueError('Unknown enum value: %d' % proposed_value) +@@ -149,9 +146,9 @@ + """ + + def CheckValue(self, proposed_value): +- if not isinstance(proposed_value, (bytes, unicode)): ++ if not isinstance(proposed_value, (bytes, six.text_type)): + message = ('%.1024r has type %s, but expected one of: %s' % +- (proposed_value, type(proposed_value), (bytes, unicode))) ++ (proposed_value, type(proposed_value), (bytes, six.text_type))) + raise TypeError(message) + + # If the value is of type 'bytes' make sure that it is in 7-bit ASCII +@@ -184,13 +181,13 @@ + class Int64ValueChecker(IntValueChecker): + _MIN = -(1 << 63) + _MAX = (1 << 63) - 1 +- _TYPE = long ++ _TYPE = int + + + class Uint64ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 64) - 1 +- _TYPE = long ++ _TYPE = int + + + # Type-checkers for all scalar CPPTYPEs. +@@ -200,9 +197,9 @@ + _FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(), + _FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker( +- float, int, long), ++ float, int, int), + _FieldDescriptor.CPPTYPE_FLOAT: TypeChecker( +- float, int, long), ++ float, int, int), + _FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int), + _FieldDescriptor.CPPTYPE_STRING: TypeChecker(bytes), + } +--- protobuf-2.6.1.orig/python/google/protobuf/message_factory.py ++++ protobuf-2.6.1/python/google/protobuf/message_factory.py +@@ -28,8 +28,6 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-#PY25 compatible for GAE. +-# + # Copyright 2012 Google Inc. All Rights Reserved. + + """Provides a factory class for generating dynamic messages. +@@ -43,7 +41,6 @@ + + __author__ = 'matthewtoia@google.com (Matt Toia)' + +-import sys ##PY25 + from google.protobuf import descriptor_database + from google.protobuf import descriptor_pool + from google.protobuf import message +@@ -75,8 +72,7 @@ + """ + if descriptor.full_name not in self._classes: + descriptor_name = descriptor.name +- if sys.version_info[0] < 3: ##PY25 +-##!PY25 if str is bytes: # PY2 ++ if str is bytes: # PY2 + descriptor_name = descriptor.name.encode('ascii', 'ignore') + result_class = reflection.GeneratedProtocolMessageType( + descriptor_name, +@@ -111,7 +107,7 @@ + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) +- for name, msg in file_desc.message_types_by_name.iteritems(): ++ for name, msg in file_desc.message_types_by_name.items(): + if file_desc.package: + full_name = '.'.join([file_desc.package, name]) + else: +@@ -128,7 +124,7 @@ + # ignore the registration if the original was the same, or raise + # an error if they were different. + +- for name, extension in file_desc.extensions_by_name.iteritems(): ++ for name, extension in file_desc.extensions_by_name.items(): + if extension.containing_type.full_name not in self._classes: + self.GetPrototype(extension.containing_type) + extended_class = self._classes[extension.containing_type.full_name] +--- protobuf-2.6.1.orig/python/google/protobuf/text_encoding.py ++++ protobuf-2.6.1/python/google/protobuf/text_encoding.py +@@ -27,16 +27,13 @@ + # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-#PY25 compatible for GAE. +-# + """Encoding related utilities.""" +- + import re +-import sys ##PY25 ++ ++import six + + # Lookup table for utf8 +-_cescape_utf8_to_str = [chr(i) for i in xrange(0, 256)] ++_cescape_utf8_to_str = [chr(i) for i in range(0, 256)] + _cescape_utf8_to_str[9] = r'\t' # optional escape + _cescape_utf8_to_str[10] = r'\n' # optional escape + _cescape_utf8_to_str[13] = r'\r' # optional escape +@@ -46,9 +43,9 @@ + _cescape_utf8_to_str[92] = r'\\' # necessary escape + + # Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32) +-_cescape_byte_to_str = ([r'\%03o' % i for i in xrange(0, 32)] + +- [chr(i) for i in xrange(32, 127)] + +- [r'\%03o' % i for i in xrange(127, 256)]) ++_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] + ++ [chr(i) for i in range(32, 127)] + ++ [r'\%03o' % i for i in range(127, 256)]) + _cescape_byte_to_str[9] = r'\t' # optional escape + _cescape_byte_to_str[10] = r'\n' # optional escape + _cescape_byte_to_str[13] = r'\r' # optional escape +@@ -75,7 +72,7 @@ + """ + # PY3 hack: make Ord work for str and bytes: + # //platforms/networking/data uses unicode here, hence basestring. +- Ord = ord if isinstance(text, basestring) else lambda x: x ++ Ord = ord if isinstance(text, six.string_types) else lambda x: x + if as_utf8: + return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text) + return ''.join(_cescape_byte_to_str[Ord(c)] for c in text) +@@ -100,8 +97,7 @@ + # allow single-digit hex escapes (like '\xf'). + result = _CUNESCAPE_HEX.sub(ReplaceHex, text) + +- if sys.version_info[0] < 3: ##PY25 +-##!PY25 if str is bytes: # PY2 ++ if str is bytes: # PY2 + return result.decode('string_escape') + result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result) + return (result.encode('ascii') # Make it bytes to allow decode. +--- protobuf-2.6.1.orig/python/google/protobuf/text_format.py ++++ protobuf-2.6.1/python/google/protobuf/text_format.py +@@ -28,8 +28,6 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-#PY25 compatible for GAE. +-# + # Copyright 2007 Google Inc. All Rights Reserved. + + """Contains routines for printing protocol messages in text format.""" +@@ -39,6 +37,8 @@ + import cStringIO + import re + ++import six ++ + from google.protobuf.internal import type_checkers + from google.protobuf import descriptor + from google.protobuf import text_encoding +@@ -189,7 +189,7 @@ + out.write(str(value)) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + out.write('\"') +- if isinstance(value, unicode): ++ if isinstance(value, six.text_type): + out_value = value.encode('utf-8') + else: + out_value = value +@@ -499,7 +499,7 @@ + def _PopLine(self): + while len(self._current_line) <= self._column: + try: +- self._current_line = self._lines.next() ++ self._current_line = next(self._lines) + except StopIteration: + self._current_line = '' + self._more_lines = False +@@ -569,7 +569,7 @@ + """ + try: + result = ParseInteger(self.token, is_signed=True, is_long=False) +- except ValueError, e: ++ except ValueError as e: + raise self._ParseError(str(e)) + self.NextToken() + return result +@@ -585,7 +585,7 @@ + """ + try: + result = ParseInteger(self.token, is_signed=False, is_long=False) +- except ValueError, e: ++ except ValueError as e: + raise self._ParseError(str(e)) + self.NextToken() + return result +@@ -601,7 +601,7 @@ + """ + try: + result = ParseInteger(self.token, is_signed=True, is_long=True) +- except ValueError, e: ++ except ValueError as e: + raise self._ParseError(str(e)) + self.NextToken() + return result +@@ -617,7 +617,7 @@ + """ + try: + result = ParseInteger(self.token, is_signed=False, is_long=True) +- except ValueError, e: ++ except ValueError as e: + raise self._ParseError(str(e)) + self.NextToken() + return result +@@ -633,7 +633,7 @@ + """ + try: + result = ParseFloat(self.token) +- except ValueError, e: ++ except ValueError as e: + raise self._ParseError(str(e)) + self.NextToken() + return result +@@ -649,7 +649,7 @@ + """ + try: + result = ParseBool(self.token) +- except ValueError, e: ++ except ValueError as e: + raise self._ParseError(str(e)) + self.NextToken() + return result +@@ -665,8 +665,8 @@ + """ + the_bytes = self.ConsumeByteString() + try: +- return unicode(the_bytes, 'utf-8') +- except UnicodeDecodeError, e: ++ return six.text_type(the_bytes, 'utf-8') ++ except UnicodeDecodeError as e: + raise self._StringParseError(e) + + def ConsumeByteString(self): +@@ -681,8 +681,7 @@ + the_list = [self._ConsumeSingleByteString()] + while self.token and self.token[0] in ('\'', '"'): + the_list.append(self._ConsumeSingleByteString()) +- return ''.encode('latin1').join(the_list) ##PY25 +-##!PY25 return b''.join(the_list) ++ return b''.join(the_list) + + def _ConsumeSingleByteString(self): + """Consume one token of a string literal. +@@ -700,7 +699,7 @@ + + try: + result = text_encoding.CUnescape(text[1:-1]) +- except ValueError, e: ++ except ValueError as e: + raise self._ParseError(str(e)) + self.NextToken() + return result +@@ -708,7 +707,7 @@ + def ConsumeEnum(self, field): + try: + result = ParseEnum(field, self.token) +- except ValueError, e: ++ except ValueError as e: + raise self._ParseError(str(e)) + self.NextToken() + return result +@@ -773,7 +772,7 @@ + # alternate implementations where the distinction is more significant + # (e.g. the C++ implementation) simpler. + if is_long: +- result = long(text, 0) ++ result = int(text, 0) + else: + result = int(text, 0) + except ValueError: diff -Nru protobuf-2.6.1/debian/patches/series protobuf-2.6.1/debian/patches/series --- protobuf-2.6.1/debian/patches/series 2014-10-24 17:56:18.000000000 +0000 +++ protobuf-2.6.1/debian/patches/series 2018-01-10 12:45:11.000000000 +0000 @@ -1 +1,5 @@ debian-changes +python-modernize.patch +use-io-bytesio.patch +fix-long-int-bugs.patch +add-python3.patch diff -Nru protobuf-2.6.1/debian/patches/use-io-bytesio.patch protobuf-2.6.1/debian/patches/use-io-bytesio.patch --- protobuf-2.6.1/debian/patches/use-io-bytesio.patch 1970-01-01 00:00:00.000000000 +0000 +++ protobuf-2.6.1/debian/patches/use-io-bytesio.patch 2018-01-10 12:35:21.000000000 +0000 @@ -0,0 +1,36 @@ +Description: Use 'io.BytesIO' rather than 'cStringIO.StringIO'. +Origin: backport, https://github.com/google/protobuf/commit/47ee4d37c17db8e97fe5b15cf918ab56ff93bb18 +Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/protobuf/+bug/1735160 +Last-Update: 2018-01-10 + +--- protobuf-2.6.1.orig/python/google/protobuf/internal/encoder.py ++++ protobuf-2.6.1/python/google/protobuf/internal/encoder.py +@@ -43,7 +43,7 @@ + sizer takes a value of this field's type and computes its byte size. The + encoder takes a writer function and a value. It encodes the value into byte + strings and invokes the writer function to write those strings. Typically the +-writer function is the write() method of a cStringIO. ++writer function is the write() method of a BytesIO. + + We try to do as much work as possible when constructing the writer and the + sizer rather than when calling them. In particular: +--- protobuf-2.6.1.orig/python/google/protobuf/text_format.py ++++ protobuf-2.6.1/python/google/protobuf/text_format.py +@@ -34,7 +34,7 @@ + + __author__ = 'kenton@google.com (Kenton Varda)' + +-import cStringIO ++import io + import re + + import six +@@ -89,7 +89,7 @@ + Returns: + A string of the text formatted protocol buffer message. + """ +- out = cStringIO.StringIO() ++ out = io.BytesIO() + PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line, + pointy_brackets=pointy_brackets, + use_index_order=use_index_order, diff -Nru protobuf-2.6.1/debian/rules protobuf-2.6.1/debian/rules --- protobuf-2.6.1/debian/rules 2015-08-26 20:37:55.000000000 +0000 +++ protobuf-2.6.1/debian/rules 2018-01-10 12:55:16.000000000 +0000 @@ -18,6 +18,9 @@ # Python build. cd python && python setup.py build --cpp_implementation + # Python3 build. + cd python3 && python3 setup.py build --cpp_implementation + override_dh_auto_build-indep: dh_auto_build --indep @@ -34,6 +37,13 @@ cd python && for python in $(shell pyversions -r); do \ $$python setup.py test --cpp_implementation; \ done + + # Python3 test. + set -e; \ + export LD_LIBRARY_PATH=$(CURDIR)/src/.libs; \ + cd python3 && for python in $(shell py3versions -r); do \ + $$python setup.py test --cpp_implementation; \ + done endif override_dh_auto_test-indep: @@ -49,6 +59,14 @@ done rm -rf python/protobuf.egg-info + # Python3 clean. + set -e; \ + cd python3 && for python in $(shell py3versions -r); do \ + $$python setup.py clean --all; \ + done + rm -rf python3/protobuf.egg-info + rm -rf python3/build + override_dh_auto_clean-indep: dh_auto_clean --indep @@ -68,6 +86,15 @@ done find $(CURDIR)/debian/python-protobuf -name 'protobuf-*-nspkg.pth' -delete + # Python3 install. + set -e; \ + cd python3 && for python in $(shell py3versions -r); do \ + $$python setup.py install --cpp_implementation \ + --install-layout=deb --no-compile \ + --root=$(CURDIR)/debian/python3-protobuf; \ + done + find $(CURDIR)/debian/python3-protobuf -name 'protobuf-*-nspkg.pth' -delete + override_dh_auto_install-indep: dh_auto_install --indep