changeset 86:28704a2a7461 vendor/python-fastimport

Import python-fastimport-0.9.8
author Roy Marples <roy@marples.name>
date Tue, 19 Jan 2021 22:56:34 +0000
parents 1f5544a8870b
children 51664f5abc3a
files hgext3rd/fastimport/vendor/python_fastimport/AUTHORS hgext3rd/fastimport/vendor/python_fastimport/COPYING hgext3rd/fastimport/vendor/python_fastimport/NEWS hgext3rd/fastimport/vendor/python_fastimport/README.md hgext3rd/fastimport/vendor/python_fastimport/__init__.py hgext3rd/fastimport/vendor/python_fastimport/commands.py hgext3rd/fastimport/vendor/python_fastimport/dates.py hgext3rd/fastimport/vendor/python_fastimport/errors.py hgext3rd/fastimport/vendor/python_fastimport/helpers.py hgext3rd/fastimport/vendor/python_fastimport/parser.py hgext3rd/fastimport/vendor/python_fastimport/processor.py hgext3rd/fastimport/vendor/python_fastimport/processors/__init__.py hgext3rd/fastimport/vendor/python_fastimport/processors/filter_processor.py hgext3rd/fastimport/vendor/python_fastimport/processors/info_processor.py hgext3rd/fastimport/vendor/python_fastimport/processors/query_processor.py hgext3rd/fastimport/vendor/python_fastimport/reftracker.py hgext3rd/fastimport/vendor/python_fastimport/tests/__init__.py hgext3rd/fastimport/vendor/python_fastimport/tests/test_commands.py hgext3rd/fastimport/vendor/python_fastimport/tests/test_dates.py hgext3rd/fastimport/vendor/python_fastimport/tests/test_errors.py hgext3rd/fastimport/vendor/python_fastimport/tests/test_filter_processor.py hgext3rd/fastimport/vendor/python_fastimport/tests/test_helpers.py hgext3rd/fastimport/vendor/python_fastimport/tests/test_info_processor.py hgext3rd/fastimport/vendor/python_fastimport/tests/test_parser.py
diffstat 23 files changed, 5337 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/AUTHORS	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,6 @@
+Ian Clatworthy wrote bzr-fastimport, which included a lot of generic code for
+parsing and generating fastimport streams. Jelmer Vernooij split out
+into its own separate package (python-fastimport) so it can be used by other
+projects, and is its current maintainer.
+
+Félix Mattrat ported python-fastimport to Python 3.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/COPYING	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,339 @@
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+	    How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/NEWS	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,62 @@
+0.9.8	2018-04-15
+
+ * Fix version number. (Jelmer Vernooij)
+
+0.9.7	2018-04-15
+
+ * Don't attempt to encode bytestrings in utf8_bytes_helper().
+   (Jelmer Vernooij, #1647101)
+
+ * Add fast-import-filter, fast-import-query and fast-import-info
+   script. (Jelmer Vernooij)
+
+0.9.6	2016-04-19
+
+ * Add python3.4 support (Jelmer Vernooij)
+
+0.9.5	2016-04-18
+
+ * Add python3.5 support. (Félix Mattrat)
+
+0.9.4	2014-07-04
+
+ * Get handlers from class object using getattr() for possible inheritance
+   (Cécile Tonglet)
+
+ * Fix 'check-pypy' by removing use of nonexistant target. (masklinn)
+
+ * Use namedtuple for authorship tuple in Commit.{author,committer}.
+   (masklinn)
+
+0.9.3	2014-03-01
+
+ * Remove unused and untested helper single_plural,
+   invert_dict, invert_dictset, defines_to_dict and
+   binary_stream.
+   (Jelmer Vernooij)
+
+ * Install NEWS and README files.
+
+0.9.2	2012-04-03
+
+ * Remove reftracker and idmapfile, which are bzr-specific.
+   (Jelmer Vernooij, #693507)
+
+ * Cope with invalid timezones like +61800 a little bit better.
+   (Jelmer Vernooij, #959154)
+
+ * Allow non-strict parsing of fastimport streams, when
+   a tagger is missing an email address.
+   (Jelmer Vernooij, #730607)
+
+0.9.1	2012-02-28
+
+ * Update FSF address in headers. (Dan Callaghan, #868800)
+
+ * Support 'done' feature. (Jelmer Vernooij, #942563)
+
+ * Rename tarball for the benefit of pip. (Jelmer Vernooij, #779690)
+
+0.9.0	2011-01-30
+
+ Initial release.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/README.md	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,7 @@
+[![Build Status](https://travis-ci.org/jelmer/python-fastimport.png?branch=master)](https://travis-ci.org/jelmer/python-fastimport)
+
+python-fastimport
+=================
+
+This package provides a parser for and generator of the Git fastimport format.
+(https://www.kernel.org/pub/software/scm/git/docs/git-fast-import.html)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/__init__.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,33 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Fastimport file format parser and generator
+
+This is a Python parser for git's fast-import format.  It was
+originally developed for bzr-fastimport but has been extracted so
+it can be used by other projects.  Use it like so:
+
+   import fastimport.processor
+   import fastimport.parser
+
+   class ImportProcessor(fastimport.processor.ImportProcessor):
+       ...
+
+   parser = fastimport.parser.ImportParser(sys.stdin)
+   processor = ImportProcessor(...)
+   processor.process(parser.parse())
+"""
+
+__version__ = (0, 9, 8)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/commands.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,530 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""fast-import command classes.
+
+These objects are used by the parser to represent the content of
+a fast-import stream.
+"""
+from __future__ import division
+
+import re
+import stat
+import sys
+
+from fastimport.helpers import (
+    newobject as object,
+    utf8_bytes_string,
+    repr_bytes,
+    )
+
+
+# There is a bug in git 1.5.4.3 and older by which unquoting a string consumes
+# one extra character. Set this variable to True to work-around it. It only
+# happens when renaming a file whose name contains spaces and/or quotes, and
+# the symptom is:
+#   % git-fast-import
+#   fatal: Missing space after source: R "file 1.txt" file 2.txt
+# http://git.kernel.org/?p=git/git.git;a=commit;h=c8744d6a8b27115503565041566d97c21e722584
+GIT_FAST_IMPORT_NEEDS_EXTRA_SPACE_AFTER_QUOTE = False
+
+
+# Lists of command names
+COMMAND_NAMES = [b'blob', b'checkpoint', b'commit', b'feature', b'progress',
+    b'reset', b'tag']
+FILE_COMMAND_NAMES = [b'filemodify', b'filedelete', b'filecopy', b'filerename',
+    b'filedeleteall']
+
+# Feature names
+MULTIPLE_AUTHORS_FEATURE = b'multiple-authors'
+COMMIT_PROPERTIES_FEATURE = b'commit-properties'
+EMPTY_DIRS_FEATURE = b'empty-directories'
+FEATURE_NAMES = [
+    MULTIPLE_AUTHORS_FEATURE,
+    COMMIT_PROPERTIES_FEATURE,
+    EMPTY_DIRS_FEATURE,
+    ]
+
+
+class ImportCommand(object):
+    """Base class for import commands."""
+
+    def __init__(self, name):
+        self.name = name
+        # List of field names not to display
+        self._binary = []
+
+    def __str__(self):
+        return repr(self)
+
+    def __repr__(self):
+        if sys.version_info[0] == 2:
+            return self.__bytes__()
+        else:
+            return bytes(self).decode('utf8')
+
+    def __bytes__(self):
+        raise NotImplementedError(
+            'An implementation of __bytes__ is required'
+        )
+
+    def dump_str(self, names=None, child_lists=None, verbose=False):
+        """Dump fields as a string.
+
+        For debugging.
+
+        :param names: the list of fields to include or
+            None for all public fields
+        :param child_lists: dictionary of child command names to
+            fields for that child command to include
+        :param verbose: if True, prefix each line with the command class and
+            display fields as a dictionary; if False, dump just the field
+            values with tabs between them
+        """
+        interesting = {}
+        if names is None:
+            fields = [
+                k for k in list(self.__dict__.keys())
+                if not k.startswith(b'_')
+            ]
+        else:
+            fields = names
+        for field in fields:
+            value = self.__dict__.get(field)
+            if field in self._binary and value is not None:
+                value = b'(...)'
+            interesting[field] = value
+        if verbose:
+            return "%s: %s" % (self.__class__.__name__, interesting)
+        else:
+            return "\t".join([repr(interesting[k]) for k in fields])
+
+
+class BlobCommand(ImportCommand):
+
+    def __init__(self, mark, data, lineno=0):
+        ImportCommand.__init__(self, b'blob')
+        self.mark = mark
+        self.data = data
+        self.lineno = lineno
+        # Provide a unique id in case the mark is missing
+        if mark is None:
+            self.id = b'@' + ("%d" % lineno).encode('utf-8')
+        else:
+            self.id = b':' + mark
+        self._binary = [b'data']
+
+    def __bytes__(self):
+        if self.mark is None:
+            mark_line = b''
+        else:
+            mark_line = b"\nmark :" + self.mark
+        return (b'blob' + mark_line + b'\n' +
+                ('data %d\n' % len(self.data)).encode('utf-8') + self.data)
+
+
+class CheckpointCommand(ImportCommand):
+
+    def __init__(self):
+        ImportCommand.__init__(self, b'checkpoint')
+
+    def __bytes__(self):
+        return b'checkpoint'
+
+
+class CommitCommand(ImportCommand):
+
+    def __init__(self, ref, mark, author, committer, message, from_,
+        merges, file_iter, lineno=0, more_authors=None, properties=None):
+        ImportCommand.__init__(self, b'commit')
+        self.ref = ref
+        self.mark = mark
+        self.author = author
+        self.committer = committer
+        self.message = message
+        self.from_ = from_
+        self.merges = merges
+        self.file_iter = file_iter
+        self.more_authors = more_authors
+        self.properties = properties
+        self.lineno = lineno
+        self._binary = [b'file_iter']
+        # Provide a unique id in case the mark is missing
+        if self.mark is None:
+            self.id = b'@' + ('%d' % lineno).encode('utf-8')
+        else:
+            if isinstance(self.mark, (int)):
+                self.id = b':' + str(self.mark).encode('utf-8')
+            else:
+                self.id = b':' + self.mark
+
+    def copy(self, **kwargs):
+        if not isinstance(self.file_iter, list):
+            self.file_iter = list(self.file_iter)
+
+        fields = dict(
+            (key, value)
+            for key, value in self.__dict__.items()
+            if key not in ('id', 'name')
+            if not key.startswith('_')
+        )
+
+        fields.update(kwargs)
+
+        return CommitCommand(**fields)
+
+    def __bytes__(self):
+        return self.to_string(include_file_contents=True)
+
+
+    def to_string(self, use_features=True, include_file_contents=False):
+        """
+            @todo the name to_string is ambiguous since the method actually
+                returns bytes.
+        """
+        if self.mark is None:
+            mark_line = b''
+        else:
+            if isinstance(self.mark, (int)):
+                mark_line = b'\nmark :' + str(self.mark).encode('utf-8')
+            else:
+                mark_line = b'\nmark :' + self.mark
+
+        if self.author is None:
+            author_section = b''
+        else:
+            author_section = b'\nauthor ' + format_who_when(self.author)
+            if use_features and self.more_authors:
+                for author in self.more_authors:
+                    author_section += b'\nauthor ' + format_who_when(author)
+
+        committer = b'committer ' + format_who_when(self.committer)
+
+        if self.message is None:
+            msg_section = b''
+        else:
+            msg = self.message
+            msg_section = ('\ndata %d\n' % len(msg)).encode('ascii') + msg
+        if self.from_ is None:
+            from_line = b''
+        else:
+            from_line = b'\nfrom ' + self.from_
+        if self.merges is None:
+            merge_lines = b''
+        else:
+            merge_lines = b''.join([b'\nmerge ' + m
+                for m in self.merges])
+        if use_features and self.properties:
+            property_lines = []
+            for name in sorted(self.properties):
+                value = self.properties[name]
+                property_lines.append(b'\n' + format_property(name, value))
+            properties_section = b''.join(property_lines)
+        else:
+            properties_section = b''
+        if self.file_iter is None:
+            filecommands = b''
+        else:
+            if include_file_contents:
+                filecommands = b''.join([b'\n' + repr_bytes(c)
+                    for c in self.iter_files()])
+            else:
+                filecommands = b''.join([b'\n' + str(c)
+                    for c in self.iter_files()])
+        return b''.join([
+            b'commit ',
+            self.ref,
+            mark_line,
+            author_section + b'\n',
+            committer,
+            msg_section,
+            from_line,
+            merge_lines,
+            properties_section,
+            filecommands])
+
+    def dump_str(self, names=None, child_lists=None, verbose=False):
+        result = [ImportCommand.dump_str(self, names, verbose=verbose)]
+        for f in self.iter_files():
+            if child_lists is None:
+                continue
+            try:
+                child_names = child_lists[f.name]
+            except KeyError:
+                continue
+            result.append('\t%s' % f.dump_str(child_names, verbose=verbose))
+        return '\n'.join(result)
+
+    def iter_files(self):
+        """Iterate over files."""
+        # file_iter may be a callable or an iterator
+        if callable(self.file_iter):
+            return self.file_iter()
+        return iter(self.file_iter)
+
+
+class FeatureCommand(ImportCommand):
+
+    def __init__(self, feature_name, value=None, lineno=0):
+        ImportCommand.__init__(self, b'feature')
+        self.feature_name = feature_name
+        self.value = value
+        self.lineno = lineno
+
+    def __bytes__(self):
+        if self.value is None:
+            value_text = b''
+        else:
+            value_text = b'=' + self.value
+        return b'feature ' + self.feature_name + value_text
+
+
+class ProgressCommand(ImportCommand):
+
+    def __init__(self, message):
+        ImportCommand.__init__(self, b'progress')
+        self.message = message
+
+    def __bytes__(self):
+        return b'progress ' + self.message
+
+
+class ResetCommand(ImportCommand):
+
+    def __init__(self, ref, from_):
+        ImportCommand.__init__(self, b'reset')
+        self.ref = ref
+        self.from_ = from_
+
+    def __bytes__(self):
+        if self.from_ is None:
+            from_line = b''
+        else:
+            # According to git-fast-import(1), the extra LF is optional here;
+            # however, versions of git up to 1.5.4.3 had a bug by which the LF
+            # was needed. Always emit it, since it doesn't hurt and maintains
+            # compatibility with older versions.
+            # http://git.kernel.org/?p=git/git.git;a=commit;h=655e8515f279c01f525745d443f509f97cd805ab
+            from_line = b'\nfrom ' + self.from_ + b'\n'
+        return b'reset ' + self.ref + from_line
+
+
+class TagCommand(ImportCommand):
+
+    def __init__(self, id, from_, tagger, message):
+        ImportCommand.__init__(self, b'tag')
+        self.id = id
+        self.from_ = from_
+        self.tagger = tagger
+        self.message = message
+
+    def __bytes__(self):
+        if self.from_ is None:
+            from_line = b''
+        else:
+            from_line = b'\nfrom ' + self.from_
+        if self.tagger is None:
+            tagger_line = b''
+        else:
+            tagger_line = b'\ntagger ' + format_who_when(self.tagger)
+        if self.message is None:
+            msg_section = b''
+        else:
+            msg = self.message
+            msg_section = ('\ndata %d\n' % len(msg)).encode('ascii') + msg
+        return b'tag ' + self.id + from_line + tagger_line + msg_section
+
+
+class FileCommand(ImportCommand):
+    """Base class for file commands."""
+    pass
+
+
+class FileModifyCommand(FileCommand):
+
+    def __init__(self, path, mode, dataref, data):
+        # Either dataref or data should be null
+        FileCommand.__init__(self, b'filemodify')
+        self.path = check_path(path)
+        self.mode = mode
+        self.dataref = dataref
+        self.data = data
+        self._binary = [b'data']
+
+    def __bytes__(self):
+        return self.to_string(include_file_contents=True)
+
+    def __str__(self):
+        return self.to_string(include_file_contents=False)
+
+    def _format_mode(self, mode):
+        if mode in (0o755, 0o100755):
+            return b'755'
+        elif mode in (0o644, 0o100644):
+            return b'644'
+        elif mode == 0o40000:
+            return b'040000'
+        elif mode == 0o120000:
+            return b'120000'
+        elif mode == 0o160000:
+            return b'160000'
+        else:
+            raise AssertionError('Unknown mode %o' % mode)
+
+    def to_string(self, include_file_contents=False):
+        datastr = b''
+        if stat.S_ISDIR(self.mode):
+            dataref = b'-'
+        elif self.dataref is None:
+            dataref = b'inline'
+            if include_file_contents:
+                datastr = ('\ndata %d\n' % len(self.data)).encode('ascii') + self.data
+        else:
+            dataref = self.dataref
+        path = format_path(self.path)
+
+        return b' '.join(
+            [b'M', self._format_mode(self.mode), dataref, path + datastr])
+
+
+class FileDeleteCommand(FileCommand):
+
+    def __init__(self, path):
+        FileCommand.__init__(self, b'filedelete')
+        self.path = check_path(path)
+
+    def __bytes__(self):
+        return b' '.join([b'D', format_path(self.path)])
+
+
+class FileCopyCommand(FileCommand):
+
+    def __init__(self, src_path, dest_path):
+        FileCommand.__init__(self, b'filecopy')
+        self.src_path = check_path(src_path)
+        self.dest_path = check_path(dest_path)
+
+    def __bytes__(self):
+        return b' '.join([b'C',
+            format_path(self.src_path, quote_spaces=True),
+            format_path(self.dest_path)])
+
+
+class FileRenameCommand(FileCommand):
+
+    def __init__(self, old_path, new_path):
+        FileCommand.__init__(self, b'filerename')
+        self.old_path = check_path(old_path)
+        self.new_path = check_path(new_path)
+
+    def __bytes__(self):
+        return b' '.join([
+            b'R',
+            format_path(self.old_path, quote_spaces=True),
+            format_path(self.new_path)]
+        )
+
+
+class FileDeleteAllCommand(FileCommand):
+
+    def __init__(self):
+        FileCommand.__init__(self, b'filedeleteall')
+
+    def __bytes__(self):
+        return b'deleteall'
+
+
+class NoteModifyCommand(FileCommand):
+
+    def __init__(self, from_, data):
+        super(NoteModifyCommand, self).__init__(b'notemodify')
+        self.from_ = from_
+        self.data = data
+        self._binary = ['data']
+
+    def __bytes__(self):
+        return (b'N inline :' + self.from_ +
+                ('\ndata %d\n'% len(self.data)).encode('ascii') + self.data)
+
+
+def check_path(path):
+    """Check that a path is legal.
+
+    :return: the path if all is OK
+    :raise ValueError: if the path is illegal
+    """
+    if path is None or path == b'' or path.startswith(b'/'):
+        raise ValueError("illegal path '%s'" % path)
+
+    if (
+        (sys.version_info[0] >= 3 and not isinstance(path, bytes)) and
+        (sys.version_info[0] == 2 and not isinstance(path, str))
+    ):
+        raise TypeError("illegale type for path '%r'" % path)
+
+    return path
+
+
+def format_path(p, quote_spaces=False):
+    """Format a path in utf8, quoting it if necessary."""
+    if b'\n' in p:
+        p = re.sub(b'\n', b'\\n', p)
+        quote = True
+    else:
+        quote = p[0] == b'"' or (quote_spaces and b' ' in p)
+    if quote:
+        extra = GIT_FAST_IMPORT_NEEDS_EXTRA_SPACE_AFTER_QUOTE and b' ' or b''
+        p = b'"' + p + b'"' + extra
+    return p
+
+
+def format_who_when(fields):
+    """Format a tuple of name,email,secs-since-epoch,utc-offset-secs as a string."""
+    offset = fields[3]
+    if offset < 0:
+        offset_sign = b'-'
+        offset = abs(offset)
+    else:
+        offset_sign = b'+'
+    offset_hours = offset // 3600
+    offset_minutes = offset // 60 - offset_hours * 60
+    offset_str = offset_sign + ('%02d%02d' % (offset_hours, offset_minutes)).encode('ascii')
+    name = fields[0]
+
+    if name == b'':
+        sep = b''
+    else:
+        sep = b' '
+
+    name = utf8_bytes_string(name)
+
+    email = fields[1]
+
+    email = utf8_bytes_string(email)
+
+    return b''.join((name, sep, b'<', email, b'> ', ("%d" % fields[2]).encode('ascii'), b' ', offset_str))
+
+
+def format_property(name, value):
+    """Format the name and value (both unicode) of a property as a string."""
+    result = b''
+    utf8_name = utf8_bytes_string(name)
+
+    result = b'property ' + utf8_name
+    if value is not None:
+        utf8_value = utf8_bytes_string(value)
+        result += b' ' + ('%d' % len(utf8_value)).encode('ascii') + b' ' + utf8_value
+
+    return result
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/dates.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,86 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Date parsing routines.
+
+Each routine represents a date format that can be specified in a
+stream using the date-format feature.  The return value is
+timestamp,timezone where
+
+* timestamp is seconds since epoch
+* timezone is the offset from UTC in seconds.
+"""
+import time
+
+from fastimport import errors
+
+
+def parse_raw(s, lineno=0):
+    """Parse a date from a raw string.
+
+    The format must be exactly "seconds-since-epoch offset-utc".
+    See the spec for details.
+    """
+    timestamp_str, timezone_str = s.split(b' ', 1)
+    timestamp = float(timestamp_str)
+    try:
+        timezone = parse_tz(timezone_str)
+    except ValueError:
+        raise errors.InvalidTimezone(lineno, timezone_str)
+    return timestamp, timezone
+
+
+def parse_tz(tz):
+    """Parse a timezone specification in the [+|-]HHMM format.
+
+    :return: the timezone offset in seconds.
+    """
+    # from git_repository.py in bzr-git
+    sign_byte = tz[0:1]
+    # in python 3 b'+006'[0] would return an integer,
+    # but b'+006'[0:1] return a new bytes string.
+    if sign_byte not in (b'+', b'-'):
+        raise ValueError(tz)
+
+    sign = {b'+': +1, b'-': -1}[sign_byte]
+    hours = int(tz[1:-2])
+    minutes = int(tz[-2:])
+
+    return sign * 60 * (60 * hours + minutes)
+
+
+def parse_rfc2822(s, lineno=0):
+    """Parse a date from a rfc2822 string.
+
+    See the spec for details.
+    """
+    raise NotImplementedError(parse_rfc2822)
+
+
+def parse_now(s, lineno=0):
+    """Parse a date from a string.
+
+    The format must be exactly "now".
+    See the spec for details.
+    """
+    return time.time(), 0
+
+
+# Lookup tabel of date parsing routines
+DATE_PARSERS_BY_NAME = {
+    u'raw':      parse_raw,
+    u'rfc2822':  parse_rfc2822,
+    u'now':      parse_now,
+    }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/errors.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,183 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Exception classes for fastimport"""
+
+# Prefix to messages to show location information
+_LOCATION_FMT = "line %(lineno)d: "
+
+# ImportError is heavily based on BzrError
+
+class ImportError(Exception):
+    """The base exception class for all import processing exceptions."""
+
+    def __init__(self):
+        super(ImportError, self).__init__(self._fmt % self.__dict__)
+
+
+class ParsingError(ImportError):
+    """The base exception class for all import processing exceptions."""
+
+    _fmt = _LOCATION_FMT + "Unknown Import Parsing Error"
+
+    def __init__(self, lineno):
+        self.lineno = lineno
+        ImportError.__init__(self)
+
+
+class MissingBytes(ParsingError):
+    """Raised when EOF encountered while expecting to find more bytes."""
+
+    _fmt = (_LOCATION_FMT + "Unexpected EOF - expected %(expected)d bytes,"
+        " found %(found)d")
+
+    def __init__(self, lineno, expected, found):
+        self.expected = expected
+        self.found = found
+        ParsingError.__init__(self, lineno)
+
+
+class MissingTerminator(ParsingError):
+    """Raised when EOF encountered while expecting to find a terminator."""
+
+    _fmt = (_LOCATION_FMT +
+        "Unexpected EOF - expected '%(terminator)s' terminator")
+
+    def __init__(self, lineno, terminator):
+        self.terminator = terminator
+        ParsingError.__init__(self, lineno)
+
+
+class InvalidCommand(ParsingError):
+    """Raised when an unknown command found."""
+
+    _fmt = (_LOCATION_FMT + "Invalid command '%(cmd)s'")
+
+    def __init__(self, lineno, cmd):
+        self.cmd = cmd
+        ParsingError.__init__(self, lineno)
+
+
+class MissingSection(ParsingError):
+    """Raised when a section is required in a command but not present."""
+
+    _fmt = (_LOCATION_FMT + "Command %(cmd)s is missing section %(section)s")
+
+    def __init__(self, lineno, cmd, section):
+        self.cmd = cmd
+        self.section = section
+        ParsingError.__init__(self, lineno)
+
+
+class BadFormat(ParsingError):
+    """Raised when a section is formatted incorrectly."""
+
+    _fmt = (_LOCATION_FMT + "Bad format for section %(section)s in "
+        "command %(cmd)s: found '%(text)s'")
+
+    def __init__(self, lineno, cmd, section, text):
+        self.cmd = cmd
+        self.section = section
+        self.text = text
+        ParsingError.__init__(self, lineno)
+
+
+class InvalidTimezone(ParsingError):
+    """Raised when converting a string timezone to a seconds offset."""
+
+    _fmt = (_LOCATION_FMT +
+        "Timezone %(timezone)r could not be converted.%(reason)s")
+
+    def __init__(self, lineno, timezone, reason=None):
+        self.timezone = timezone
+        if reason:
+            self.reason = ' ' + reason
+        else:
+            self.reason = ''
+        ParsingError.__init__(self, lineno)
+
+
+class PrematureEndOfStream(ParsingError):
+    """Raised when the 'done' feature was specified but missing."""
+
+    _fmt = (_LOCATION_FMT + "Stream end before 'done' command")
+
+    def __init__(self, lineno):
+        ParsingError.__init__(self, lineno)
+
+
+class UnknownDateFormat(ImportError):
+    """Raised when an unknown date format is given."""
+
+    _fmt = ("Unknown date format '%(format)s'")
+
+    def __init__(self, format):
+        self.format = format
+        ImportError.__init__(self)
+
+
+class MissingHandler(ImportError):
+    """Raised when a processor can't handle a command."""
+
+    _fmt = ("Missing handler for command %(cmd)s")
+
+    def __init__(self, cmd):
+        self.cmd = cmd
+        ImportError.__init__(self)
+
+
+class UnknownParameter(ImportError):
+    """Raised when an unknown parameter is passed to a processor."""
+
+    _fmt = ("Unknown parameter - '%(param)s' not in %(knowns)s")
+
+    def __init__(self, param, knowns):
+        self.param = param
+        self.knowns = knowns
+        ImportError.__init__(self)
+
+
+class BadRepositorySize(ImportError):
+    """Raised when the repository has an incorrect number of revisions."""
+
+    _fmt = ("Bad repository size - %(found)d revisions found, "
+        "%(expected)d expected")
+
+    def __init__(self, expected, found):
+        self.expected = expected
+        self.found = found
+        ImportError.__init__(self)
+
+
+class BadRestart(ImportError):
+    """Raised when the import stream and id-map do not match up."""
+
+    _fmt = ("Bad restart - attempted to skip commit %(commit_id)s "
+        "but matching revision-id is unknown")
+
+    def __init__(self, commit_id):
+        self.commit_id = commit_id
+        ImportError.__init__(self)
+
+
+class UnknownFeature(ImportError):
+    """Raised when an unknown feature is given in the input stream."""
+
+    _fmt = ("Unknown feature '%(feature)s' - try a later importer or "
+        "an earlier data format")
+
+    def __init__(self, feature):
+        self.feature = feature
+        ImportError.__init__(self)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/helpers.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,265 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Miscellaneous useful stuff."""
+import sys
+
+
+def _common_path_and_rest(l1, l2, common=[]):
+    # From http://code.activestate.com/recipes/208993/
+    if len(l1) < 1: return (common, l1, l2)
+    if len(l2) < 1: return (common, l1, l2)
+    if l1[0] != l2[0]: return (common, l1, l2)
+    return _common_path_and_rest(
+        l1[1:],
+        l2[1:],
+        common + [
+            l1[0:1] # return a byte string in python 3 unlike l1[0] that
+                    # would return an integer.
+        ]
+    )
+
+
+def common_path(path1, path2):
+    """Find the common bit of 2 paths."""
+    return b''.join(_common_path_and_rest(path1, path2)[0])
+
+
+def common_directory(paths):
+    """Find the deepest common directory of a list of paths.
+
+    :return: if no paths are provided, None is returned;
+      if there is no common directory, '' is returned;
+      otherwise the common directory with a trailing / is returned.
+    """
+    import posixpath
+    def get_dir_with_slash(path):
+        if path == b'' or path.endswith(b'/'):
+            return path
+        else:
+            dirname, basename = posixpath.split(path)
+            if dirname == b'':
+                return dirname
+            else:
+                return dirname + b'/'
+
+    if not paths:
+        return None
+    elif len(paths) == 1:
+        return get_dir_with_slash(paths[0])
+    else:
+        common = common_path(paths[0], paths[1])
+        for path in paths[2:]:
+            common = common_path(common, path)
+        return get_dir_with_slash(common)
+
+
+def is_inside(directory, fname):
+    """True if fname is inside directory.
+
+    The parameters should typically be passed to osutils.normpath first, so
+    that . and .. and repeated slashes are eliminated, and the separators
+    are canonical for the platform.
+
+    The empty string as a dir name is taken as top-of-tree and matches
+    everything.
+    """
+    # XXX: Most callers of this can actually do something smarter by
+    # looking at the inventory
+    if directory == fname:
+        return True
+
+    if directory == b'':
+        return True
+
+    if not directory.endswith(b'/'):
+        directory += b'/'
+
+    return fname.startswith(directory)
+
+
+def is_inside_any(dir_list, fname):
+    """True if fname is inside any of given dirs."""
+    for dirname in dir_list:
+        if is_inside(dirname, fname):
+            return True
+    return False
+
+
+def utf8_bytes_string(s):
+    """Convert a string to a bytes string (if necessary, encode in utf8)"""
+    if sys.version_info[0] == 2:
+        if isinstance(s, str):
+            return s
+        else:
+            return s.encode('utf8')
+    else:
+        if isinstance(s, str):
+            return bytes(s, encoding='utf8')
+        else:
+            return s
+
+
+def repr_bytes(obj):
+    """Return a bytes representation of the object"""
+    if sys.version_info[0] == 2:
+        return repr(obj)
+    else:
+        return bytes(obj)
+
+
+class newobject(object):
+    """
+    A magical object class that provides Python 2 compatibility methods::
+        next
+        __unicode__
+        __nonzero__
+
+    Subclasses of this class can merely define the Python 3 methods (__next__,
+    __str__, and __bool__).
+
+    This is a copy/paste of the future.types.newobject class of the future
+    package.
+    """
+    def next(self):
+        if hasattr(self, '__next__'):
+            return type(self).__next__(self)
+        raise TypeError('newobject is not an iterator')
+
+    def __unicode__(self):
+        # All subclasses of the builtin object should have __str__ defined.
+        # Note that old-style classes do not have __str__ defined.
+        if hasattr(self, '__str__'):
+            s = type(self).__str__(self)
+        else:
+            s = str(self)
+        if isinstance(s, unicode):
+            return s
+        else:
+            return s.decode('utf-8')
+
+    def __nonzero__(self):
+        if hasattr(self, '__bool__'):
+            return type(self).__bool__(self)
+        # object has no __nonzero__ method
+        return True
+
+    # Are these ever needed?
+    # def __div__(self):
+    #     return self.__truediv__()
+
+    # def __idiv__(self, other):
+    #     return self.__itruediv__(other)
+
+    def __long__(self):
+        if not hasattr(self, '__int__'):
+            return NotImplemented
+        return self.__int__()  # not type(self).__int__(self)
+
+    # def __new__(cls, *args, **kwargs):
+    #     """
+    #     dict() -> new empty dictionary
+    #     dict(mapping) -> new dictionary initialized from a mapping object's
+    #         (key, value) pairs
+    #     dict(iterable) -> new dictionary initialized as if via:
+    #         d = {}
+    #         for k, v in iterable:
+    #             d[k] = v
+    #     dict(**kwargs) -> new dictionary initialized with the name=value pairs
+    #         in the keyword argument list.  For example:  dict(one=1, two=2)
+    #     """
+
+    #     if len(args) == 0:
+    #         return super(newdict, cls).__new__(cls)
+    #     elif type(args[0]) == newdict:
+    #         return args[0]
+    #     else:
+    #         value = args[0]
+    #     return super(newdict, cls).__new__(cls, value)
+
+    def __native__(self):
+        """
+        Hook for the future.utils.native() function
+        """
+        return object(self)
+
+
+def binary_stream(stream):
+    """Ensure a stream is binary on Windows.
+
+    :return: the stream
+    """
+    try:
+        import os
+        if os.name == 'nt':
+            fileno = getattr(stream, 'fileno', None)
+            if fileno:
+                no = fileno()
+                if no >= 0:     # -1 means we're working as subprocess
+                    import msvcrt
+                    msvcrt.setmode(no, os.O_BINARY)
+    except ImportError:
+        pass
+    return stream
+
+
+def invert_dictset(d):
+    """Invert a dictionary with keys matching a set of values, turned into lists."""
+    # Based on recipe from ASPN
+    result = {}
+    for k, c in d.items():
+        for v in c:
+            keys = result.setdefault(v, [])
+            keys.append(k)
+    return result
+
+
+def invert_dict(d):
+    """Invert a dictionary with keys matching each value turned into a list."""
+    # Based on recipe from ASPN
+    result = {}
+    for k, v in d.items():
+        keys = result.setdefault(v, [])
+        keys.append(k)
+    return result
+
+
+def defines_to_dict(defines):
+    """Convert a list of definition strings to a dictionary."""
+    if defines is None:
+        return None
+    result = {}
+    for define in defines:
+        kv = define.split('=', 1)
+        if len(kv) == 1:
+            result[define.strip()] = 1
+        else:
+            result[kv[0].strip()] = kv[1].strip()
+    return result
+
+
+def get_source_stream(source):
+    if source == '-' or source is None:
+        import sys
+        stream = binary_stream(sys.stdin)
+    elif source.endswith('.gz'):
+        import gzip
+        stream = gzip.open(source, "rb")
+    else:
+        stream = open(source, "rb")
+    return stream
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/parser.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,659 @@
+# Copyright (C) 2008-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Parser of import data into command objects.
+
+In order to reuse existing front-ends, the stream format is a subset of
+the one used by git-fast-import (as of the 1.5.4 release of git at least).
+The grammar is:
+
+  stream ::= cmd*;
+
+  cmd ::= new_blob
+        | new_commit
+        | new_tag
+        | reset_branch
+        | checkpoint
+        | progress
+        ;
+
+  new_blob ::= 'blob' lf
+    mark?
+    file_content;
+  file_content ::= data;
+
+  new_commit ::= 'commit' sp ref_str lf
+    mark?
+    ('author' sp name '<' email '>' when lf)?
+    'committer' sp name '<' email '>' when lf
+    commit_msg
+    ('from' sp (ref_str | hexsha1 | sha1exp_str | idnum) lf)?
+    ('merge' sp (ref_str | hexsha1 | sha1exp_str | idnum) lf)*
+    file_change*
+    lf?;
+  commit_msg ::= data;
+
+  file_change ::= file_clr
+    | file_del
+    | file_rnm
+    | file_cpy
+    | file_obm
+    | file_inm;
+  file_clr ::= 'deleteall' lf;
+  file_del ::= 'D' sp path_str lf;
+  file_rnm ::= 'R' sp path_str sp path_str lf;
+  file_cpy ::= 'C' sp path_str sp path_str lf;
+  file_obm ::= 'M' sp mode sp (hexsha1 | idnum) sp path_str lf;
+  file_inm ::= 'M' sp mode sp 'inline' sp path_str lf
+    data;
+
+  new_tag ::= 'tag' sp tag_str lf
+    'from' sp (ref_str | hexsha1 | sha1exp_str | idnum) lf
+    'tagger' sp name '<' email '>' when lf
+    tag_msg;
+  tag_msg ::= data;
+
+  reset_branch ::= 'reset' sp ref_str lf
+    ('from' sp (ref_str | hexsha1 | sha1exp_str | idnum) lf)?
+    lf?;
+
+  checkpoint ::= 'checkpoint' lf
+    lf?;
+
+  progress ::= 'progress' sp not_lf* lf
+    lf?;
+
+     # note: the first idnum in a stream should be 1 and subsequent
+     # idnums should not have gaps between values as this will cause
+     # the stream parser to reserve space for the gapped values.  An
+     # idnum can be updated in the future to a new object by issuing
+     # a new mark directive with the old idnum.
+     #
+  mark ::= 'mark' sp idnum lf;
+  data ::= (delimited_data | exact_data)
+    lf?;
+
+    # note: delim may be any string but must not contain lf.
+    # data_line may contain any data but must not be exactly
+    # delim. The lf after the final data_line is included in
+    # the data.
+  delimited_data ::= 'data' sp '<<' delim lf
+    (data_line lf)*
+    delim lf;
+
+     # note: declen indicates the length of binary_data in bytes.
+     # declen does not include the lf preceeding the binary data.
+     #
+  exact_data ::= 'data' sp declen lf
+    binary_data;
+
+     # note: quoted strings are C-style quoting supporting \c for
+     # common escapes of 'c' (e..g \n, \t, \\, \") or \nnn where nnn
+     # is the signed byte value in octal.  Note that the only
+     # characters which must actually be escaped to protect the
+     # stream formatting is: \, " and LF.  Otherwise these values
+     # are UTF8.
+     #
+  ref_str     ::= ref;
+  sha1exp_str ::= sha1exp;
+  tag_str     ::= tag;
+  path_str    ::= path    | '"' quoted(path)    '"' ;
+  mode        ::= '100644' | '644'
+                | '100755' | '755'
+                | '120000'
+                ;
+
+  declen ::= # unsigned 32 bit value, ascii base10 notation;
+  bigint ::= # unsigned integer value, ascii base10 notation;
+  binary_data ::= # file content, not interpreted;
+
+  when         ::= raw_when | rfc2822_when;
+  raw_when     ::= ts sp tz;
+  rfc2822_when ::= # Valid RFC 2822 date and time;
+
+  sp ::= # ASCII space character;
+  lf ::= # ASCII newline (LF) character;
+
+     # note: a colon (':') must precede the numerical value assigned to
+     # an idnum.  This is to distinguish it from a ref or tag name as
+     # GIT does not permit ':' in ref or tag strings.
+     #
+  idnum   ::= ':' bigint;
+  path    ::= # GIT style file path, e.g. "a/b/c";
+  ref     ::= # GIT ref name, e.g. "refs/heads/MOZ_GECKO_EXPERIMENT";
+  tag     ::= # GIT tag name, e.g. "FIREFOX_1_5";
+  sha1exp ::= # Any valid GIT SHA1 expression;
+  hexsha1 ::= # SHA1 in hexadecimal format;
+
+     # note: name and email are UTF8 strings, however name must not
+     # contain '<' or lf and email must not contain any of the
+     # following: '<', '>', lf.
+     #
+  name  ::= # valid GIT author/committer name;
+  email ::= # valid GIT author/committer email;
+  ts    ::= # time since the epoch in seconds, ascii base10 notation;
+  tz    ::= # GIT style timezone;
+
+     # note: comments may appear anywhere in the input, except
+     # within a data command.  Any form of the data command
+     # always escapes the related input from comment processing.
+     #
+     # In case it is not clear, the '#' that starts the comment
+     # must be the first character on that the line (an lf have
+     # preceeded it).
+     #
+  comment ::= '#' not_lf* lf;
+  not_lf  ::= # Any byte that is not ASCII newline (LF);
+"""
+from __future__ import print_function
+
+import collections
+import re
+import sys
+import codecs
+
+from fastimport import (
+    commands,
+    dates,
+    errors,
+    )
+from fastimport.helpers import (
+    newobject as object,
+    utf8_bytes_string,
+    )
+
+
+## Stream parsing ##
+
+class LineBasedParser(object):
+
+    def __init__(self, input_stream):
+        """A Parser that keeps track of line numbers.
+
+        :param input: the file-like object to read from
+        """
+        self.input = input_stream
+        self.lineno = 0
+        # Lines pushed back onto the input stream
+        self._buffer = []
+
+    def abort(self, exception, *args):
+        """Raise an exception providing line number information."""
+        raise exception(self.lineno, *args)
+
+    def readline(self):
+        """Get the next line including the newline or '' on EOF."""
+        self.lineno += 1
+        if self._buffer:
+            return self._buffer.pop()
+        else:
+            return self.input.readline()
+
+    def next_line(self):
+        """Get the next line without the newline or None on EOF."""
+        line = self.readline()
+        if line:
+            return line[:-1]
+        else:
+            return None
+
+    def push_line(self, line):
+        """Push line back onto the line buffer.
+
+        :param line: the line with no trailing newline
+        """
+        self.lineno -= 1
+        self._buffer.append(line + b'\n')
+
+    def read_bytes(self, count):
+        """Read a given number of bytes from the input stream.
+
+        Throws MissingBytes if the bytes are not found.
+
+        Note: This method does not read from the line buffer.
+
+        :return: a string
+        """
+        result = self.input.read(count)
+        found = len(result)
+        self.lineno += result.count(b'\n')
+        if found != count:
+            self.abort(errors.MissingBytes, count, found)
+        return result
+
+    def read_until(self, terminator):
+        """Read the input stream until the terminator is found.
+
+        Throws MissingTerminator if the terminator is not found.
+
+        Note: This method does not read from the line buffer.
+
+        :return: the bytes read up to but excluding the terminator.
+        """
+
+        lines = []
+        term = terminator + b'\n'
+        while True:
+            line = self.input.readline()
+            if line == term:
+                break
+            else:
+                lines.append(line)
+        return b''.join(lines)
+
+
+# Regular expression used for parsing. (Note: The spec states that the name
+# part should be non-empty but git-fast-export doesn't always do that so
+# the first bit is \w*, not \w+.) Also git-fast-import code says the
+# space before the email is optional.
+_WHO_AND_WHEN_RE = re.compile(br'([^<]*)<(.*)> (.+)')
+_WHO_RE = re.compile(br'([^<]*)<(.*)>')
+
+
+class ImportParser(LineBasedParser):
+
+    def __init__(self, input_stream, verbose=False, output=sys.stdout,
+        user_mapper=None, strict=True):
+        """A Parser of import commands.
+
+        :param input_stream: the file-like object to read from
+        :param verbose: display extra information of not
+        :param output: the file-like object to write messages to (YAGNI?)
+        :param user_mapper: if not None, the UserMapper used to adjust
+          user-ids for authors, committers and taggers.
+        :param strict: Raise errors on strictly invalid data
+        """
+        LineBasedParser.__init__(self, input_stream)
+        self.verbose = verbose
+        self.output = output
+        self.user_mapper = user_mapper
+        self.strict = strict
+        # We auto-detect the date format when a date is first encountered
+        self.date_parser = None
+        self.features = {}
+
+    def warning(self, msg):
+        sys.stderr.write("warning line %d: %s\n" % (self.lineno, msg))
+
+    def iter_commands(self):
+        """Iterator returning ImportCommand objects."""
+        while True:
+            line = self.next_line()
+            if line is None:
+                if b'done' in self.features:
+                    raise errors.PrematureEndOfStream(self.lineno)
+                break
+            elif len(line) == 0 or line.startswith(b'#'):
+                continue
+            # Search for commands in order of likelihood
+            elif line.startswith(b'commit '):
+                yield self._parse_commit(line[len(b'commit '):])
+            elif line.startswith(b'blob'):
+                yield self._parse_blob()
+            elif line.startswith(b'done'):
+                break
+            elif line.startswith(b'progress '):
+                yield commands.ProgressCommand(line[len(b'progress '):])
+            elif line.startswith(b'reset '):
+                yield self._parse_reset(line[len(b'reset '):])
+            elif line.startswith(b'tag '):
+                yield self._parse_tag(line[len(b'tag '):])
+            elif line.startswith(b'checkpoint'):
+                yield commands.CheckpointCommand()
+            elif line.startswith(b'feature'):
+                yield self._parse_feature(line[len(b'feature '):])
+            else:
+                self.abort(errors.InvalidCommand, line)
+
+    def iter_file_commands(self):
+        """Iterator returning FileCommand objects.
+
+        If an invalid file command is found, the line is silently
+        pushed back and iteration ends.
+        """
+        while True:
+            line = self.next_line()
+            if line is None:
+                break
+            elif len(line) == 0 or line.startswith(b'#'):
+                continue
+            # Search for file commands in order of likelihood
+            elif line.startswith(b'M '):
+                yield self._parse_file_modify(line[2:])
+            elif line.startswith(b'D '):
+                path = self._path(line[2:])
+                yield commands.FileDeleteCommand(path)
+            elif line.startswith(b'R '):
+                old, new = self._path_pair(line[2:])
+                yield commands.FileRenameCommand(old, new)
+            elif line.startswith(b'C '):
+                src, dest = self._path_pair(line[2:])
+                yield commands.FileCopyCommand(src, dest)
+            elif line.startswith(b'deleteall'):
+                yield commands.FileDeleteAllCommand()
+            else:
+                self.push_line(line)
+                break
+
+    def _parse_blob(self):
+        """Parse a blob command."""
+        lineno = self.lineno
+        mark = self._get_mark_if_any()
+        data = self._get_data(b'blob')
+        return commands.BlobCommand(mark, data, lineno)
+
+    def _parse_commit(self, ref):
+        """Parse a commit command."""
+        lineno  = self.lineno
+        mark = self._get_mark_if_any()
+        author = self._get_user_info(b'commit', b'author', False)
+        more_authors = []
+        while True:
+            another_author = self._get_user_info(b'commit', b'author', False)
+            if another_author is not None:
+                more_authors.append(another_author)
+            else:
+                break
+        committer = self._get_user_info(b'commit', b'committer')
+        message = self._get_data(b'commit', b'message')
+        from_ = self._get_from()
+        merges = []
+        while True:
+            merge = self._get_merge()
+            if merge is not None:
+                # while the spec suggests it's illegal, git-fast-export
+                # outputs multiple merges on the one line, e.g.
+                # merge :x :y :z
+                these_merges = merge.split(b' ')
+                merges.extend(these_merges)
+            else:
+                break
+        properties = {}
+        while True:
+            name_value = self._get_property()
+            if name_value is not None:
+                name, value = name_value
+                properties[name] = value
+            else:
+                break
+        return commands.CommitCommand(ref, mark, author, committer, message,
+            from_, merges, list(self.iter_file_commands()), lineno=lineno,
+            more_authors=more_authors, properties=properties)
+
+    def _parse_feature(self, info):
+        """Parse a feature command."""
+        parts = info.split(b'=', 1)
+        name = parts[0]
+        if len(parts) > 1:
+            value = self._path(parts[1])
+        else:
+            value = None
+        self.features[name] = value
+        return commands.FeatureCommand(name, value, lineno=self.lineno)
+
+    def _parse_file_modify(self, info):
+        """Parse a filemodify command within a commit.
+
+        :param info: a string in the format "mode dataref path"
+          (where dataref might be the hard-coded literal 'inline').
+        """
+        params = info.split(b' ', 2)
+        path = self._path(params[2])
+        mode = self._mode(params[0])
+        if params[1] == b'inline':
+            dataref = None
+            data = self._get_data(b'filemodify')
+        else:
+            dataref = params[1]
+            data = None
+        return commands.FileModifyCommand(path, mode, dataref,
+            data)
+
+    def _parse_reset(self, ref):
+        """Parse a reset command."""
+        from_ = self._get_from()
+        return commands.ResetCommand(ref, from_)
+
+    def _parse_tag(self, name):
+        """Parse a tag command."""
+        from_ = self._get_from(b'tag')
+        tagger = self._get_user_info(b'tag', b'tagger',
+                accept_just_who=True)
+        message = self._get_data(b'tag', b'message')
+        return commands.TagCommand(name, from_, tagger, message)
+
+    def _get_mark_if_any(self):
+        """Parse a mark section."""
+        line = self.next_line()
+        if line.startswith(b'mark :'):
+            return line[len(b'mark :'):]
+        else:
+            self.push_line(line)
+            return None
+
+    def _get_from(self, required_for=None):
+        """Parse a from section."""
+        line = self.next_line()
+        if line is None:
+            return None
+        elif line.startswith(b'from '):
+            return line[len(b'from '):]
+        elif required_for:
+            self.abort(errors.MissingSection, required_for, 'from')
+        else:
+            self.push_line(line)
+            return None
+
+    def _get_merge(self):
+        """Parse a merge section."""
+        line = self.next_line()
+        if line is None:
+            return None
+        elif line.startswith(b'merge '):
+            return line[len(b'merge '):]
+        else:
+            self.push_line(line)
+            return None
+
+    def _get_property(self):
+        """Parse a property section."""
+        line = self.next_line()
+        if line is None:
+            return None
+        elif line.startswith(b'property '):
+            return self._name_value(line[len(b'property '):])
+        else:
+            self.push_line(line)
+            return None
+
+    def _get_user_info(self, cmd, section, required=True,
+        accept_just_who=False):
+        """Parse a user section."""
+        line = self.next_line()
+        if line.startswith(section + b' '):
+            return self._who_when(line[len(section + b' '):], cmd, section,
+                accept_just_who=accept_just_who)
+        elif required:
+            self.abort(errors.MissingSection, cmd, section)
+        else:
+            self.push_line(line)
+            return None
+
+    def _get_data(self, required_for, section=b'data'):
+        """Parse a data section."""
+        line = self.next_line()
+        if line.startswith(b'data '):
+            rest = line[len(b'data '):]
+            if rest.startswith(b'<<'):
+                return self.read_until(rest[2:])
+            else:
+                size = int(rest)
+                read_bytes = self.read_bytes(size)
+                # optional LF after data.
+                next_line = self.input.readline()
+                self.lineno += 1
+                if len(next_line) > 1 or next_line != b'\n':
+                    self.push_line(next_line[:-1])
+                return read_bytes
+        else:
+            self.abort(errors.MissingSection, required_for, section)
+
+    def _who_when(self, s, cmd, section, accept_just_who=False):
+        """Parse who and when information from a string.
+
+        :return: a tuple of (name,email,timestamp,timezone). name may be
+            the empty string if only an email address was given.
+        """
+        match = _WHO_AND_WHEN_RE.search(s)
+        if match:
+            datestr = match.group(3).lstrip()
+            if self.date_parser is None:
+                # auto-detect the date format
+                if len(datestr.split(b' ')) == 2:
+                    date_format = 'raw'
+                elif datestr == b'now':
+                    date_format = 'now'
+                else:
+                    date_format = 'rfc2822'
+                self.date_parser = dates.DATE_PARSERS_BY_NAME[date_format]
+            try:
+                when = self.date_parser(datestr, self.lineno)
+            except ValueError:
+                print("failed to parse datestr '%s'" % (datestr,))
+                raise
+            name = match.group(1).rstrip()
+            email = match.group(2)
+        else:
+            match = _WHO_RE.search(s)
+            if accept_just_who and match:
+                # HACK around missing time
+                # TODO: output a warning here
+                when = dates.DATE_PARSERS_BY_NAME['now']('now')
+                name = match.group(1)
+                email = match.group(2)
+            elif self.strict:
+                self.abort(errors.BadFormat, cmd, section, s)
+            else:
+                name = s
+                email = None
+                when = dates.DATE_PARSERS_BY_NAME['now']('now')
+        if len(name) > 0:
+            if name.endswith(b' '):
+                name = name[:-1]
+        # While it shouldn't happen, some datasets have email addresses
+        # which contain unicode characters. See bug 338186. We sanitize
+        # the data at this level just in case.
+        if self.user_mapper:
+            name, email = self.user_mapper.map_name_and_email(name, email)
+
+        return Authorship(name, email, when[0], when[1])
+
+    def _name_value(self, s):
+        """Parse a (name,value) tuple from 'name value-length value'."""
+        parts = s.split(b' ', 2)
+        name = parts[0]
+        if len(parts) == 1:
+            value = None
+        else:
+            size = int(parts[1])
+            value = parts[2]
+            still_to_read = size - len(value)
+            if still_to_read > 0:
+                read_bytes = self.read_bytes(still_to_read)
+                value += b'\n' + read_bytes[:still_to_read - 1]
+        return (name, value)
+
+    def _path(self, s):
+        """Parse a path."""
+        if s.startswith(b'"'):
+            if not s.endswith(b'"'):
+                self.abort(errors.BadFormat, '?', '?', s)
+            else:
+                return _unquote_c_string(s[1:-1])
+        return s
+
+    def _path_pair(self, s):
+        """Parse two paths separated by a space."""
+        # TODO: handle a space in the first path
+        if s.startswith(b'"'):
+            parts = s[1:].split(b'" ', 1)
+        else:
+            parts = s.split(b' ', 1)
+        if len(parts) != 2:
+            self.abort(errors.BadFormat, '?', '?', s)
+        elif parts[1].startswith(b'"') and parts[1].endswith(b'"'):
+            parts[1] = parts[1][1:-1]
+        elif parts[1].startswith(b'"') or parts[1].endswith(b'"'):
+            self.abort(errors.BadFormat, '?', '?', s)
+        return [_unquote_c_string(s) for s in parts]
+
+    def _mode(self, s):
+        """Check file mode format and parse into an int.
+
+        :return: mode as integer
+        """
+        # Note: Output from git-fast-export slightly different to spec
+        if s in [b'644', b'100644', b'0100644']:
+            return 0o100644
+        elif s in [b'755', b'100755', b'0100755']:
+            return 0o100755
+        elif s in [b'040000', b'0040000']:
+            return 0o40000
+        elif s in [b'120000', b'0120000']:
+            return 0o120000
+        elif s in [b'160000', b'0160000']:
+            return 0o160000
+        else:
+            self.abort(errors.BadFormat, 'filemodify', 'mode', s)
+
+
+ESCAPE_SEQUENCE_BYTES_RE = re.compile(br'''
+    ( \\U........      # 8-digit hex escapes
+    | \\u....          # 4-digit hex escapes
+    | \\x..            # 2-digit hex escapes
+    | \\[0-7]{1,3}     # Octal escapes
+    | \\N\{[^}]+\}     # Unicode characters by name
+    | \\[\\'"abfnrtv]  # Single-character escapes
+    )''', re.VERBOSE
+)
+
+ESCAPE_SEQUENCE_RE = re.compile(r'''
+    ( \\U........
+    | \\u....
+    | \\x..
+    | \\[0-7]{1,3}
+    | \\N\{[^}]+\}
+    | \\[\\'"abfnrtv]
+    )''', re.UNICODE | re.VERBOSE
+)
+
+def _unquote_c_string(s):
+     """replace C-style escape sequences (\n, \", etc.) with real chars."""
+
+     # doing a s.encode('utf-8').decode('unicode_escape') can return an
+     # incorrect output with unicode string (both in py2 and py3) the safest way
+     # is to match the escape sequences and decoding them alone.
+     def decode_match(match):
+          return utf8_bytes_string(
+               codecs.decode(match.group(0), 'unicode-escape')
+          )
+
+     if sys.version_info[0] >= 3 and isinstance(s, bytes):
+          return ESCAPE_SEQUENCE_BYTES_RE.sub(decode_match, s)
+     else:
+          return ESCAPE_SEQUENCE_RE.sub(decode_match, s)
+
+
+Authorship = collections.namedtuple('Authorship', 'name email timestamp timezone')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/processor.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,203 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Processor for fast-import commands.
+
+This module provides the skeleton of a fast-import backend.
+To import from a fast-import stream to your version-control system:
+
+ - derive a class from the abstract ImportProcessor class and
+   implement the *_helper methods.
+
+ - parse a fast-import stream into a sequence of commands, for example
+   using the helpers from the parser module.
+
+ - pass that command sequence to the process method of your processor.
+
+See git-fast-import.1 for the meaning of each command and the
+processors package for examples.
+"""
+import sys
+import time
+
+from fastimport import errors
+from fastimport.helpers import newobject as object
+
+
+class ImportProcessor(object):
+    """Base class for fast-import stream processors.
+
+    Subclasses should override the pre_*, post_* and *_handler
+    methods as appropriate.
+    """
+
+    known_params = []
+
+    def __init__(self, params=None, verbose=False, outf=None):
+        if outf is None:
+            self.outf = sys.stdout
+        else:
+            self.outf = outf
+        self.verbose = verbose
+        if params is None:
+            self.params = {}
+        else:
+            self.params = params
+            self.validate_parameters()
+
+        # Handlers can set this to request exiting cleanly without
+        # iterating through the remaining commands
+        self.finished = False
+
+    def validate_parameters(self):
+        """Validate that the parameters are correctly specified."""
+        for p in self.params:
+            if p not in self.known_params:
+                raise errors.UnknownParameter(p, self.known_params)
+
+    def process(self, command_iter):
+        """Import data into Bazaar by processing a stream of commands.
+
+        :param command_iter: an iterator providing commands
+        """
+        self._process(command_iter)
+
+    def _process(self, command_iter):
+        self.pre_process()
+        for cmd in command_iter():
+            try:
+                name = (cmd.name + b'_handler').decode('utf8')
+                handler = getattr(self.__class__, name)
+            except KeyError:
+                raise errors.MissingHandler(cmd.name)
+            else:
+                self.pre_handler(cmd)
+                handler(self, cmd)
+                self.post_handler(cmd)
+            if self.finished:
+                break
+        self.post_process()
+
+    def warning(self, msg, *args):
+        """Output a warning but timestamp it."""
+        pass
+
+    def debug(self, mgs, *args):
+        """Output a debug message."""
+        pass
+
+    def _time_of_day(self):
+        """Time of day as a string."""
+        # Note: this is a separate method so tests can patch in a fixed value
+        return time.strftime("%H:%M:%S")
+
+    def pre_process(self):
+        """Hook for logic at start of processing."""
+        pass
+
+    def post_process(self):
+        """Hook for logic at end of processing."""
+        pass
+
+    def pre_handler(self, cmd):
+        """Hook for logic before each handler starts."""
+        pass
+
+    def post_handler(self, cmd):
+        """Hook for logic after each handler finishes."""
+        pass
+
+    def progress_handler(self, cmd):
+        """Process a ProgressCommand."""
+        raise NotImplementedError(self.progress_handler)
+
+    def blob_handler(self, cmd):
+        """Process a BlobCommand."""
+        raise NotImplementedError(self.blob_handler)
+
+    def checkpoint_handler(self, cmd):
+        """Process a CheckpointCommand."""
+        raise NotImplementedError(self.checkpoint_handler)
+
+    def commit_handler(self, cmd):
+        """Process a CommitCommand."""
+        raise NotImplementedError(self.commit_handler)
+
+    def reset_handler(self, cmd):
+        """Process a ResetCommand."""
+        raise NotImplementedError(self.reset_handler)
+
+    def tag_handler(self, cmd):
+        """Process a TagCommand."""
+        raise NotImplementedError(self.tag_handler)
+
+    def feature_handler(self, cmd):
+        """Process a FeatureCommand."""
+        raise NotImplementedError(self.feature_handler)
+
+
+class CommitHandler(object):
+    """Base class for commit handling.
+
+    Subclasses should override the pre_*, post_* and *_handler
+    methods as appropriate.
+    """
+
+    def __init__(self, command):
+        self.command = command
+
+    def process(self):
+        self.pre_process_files()
+        for fc in self.command.iter_files():
+            try:
+                name = (fc.name[4:] + b'_handler').decode('utf8')
+                handler = getattr(self.__class__, name)
+            except KeyError:
+                raise errors.MissingHandler(fc.name)
+            else:
+                handler(self, fc)
+        self.post_process_files()
+
+    def warning(self, msg, *args):
+        """Output a warning but add context."""
+        pass
+
+    def pre_process_files(self):
+        """Prepare for committing."""
+        pass
+
+    def post_process_files(self):
+        """Save the revision."""
+        pass
+
+    def modify_handler(self, filecmd):
+        """Handle a filemodify command."""
+        raise NotImplementedError(self.modify_handler)
+
+    def delete_handler(self, filecmd):
+        """Handle a filedelete command."""
+        raise NotImplementedError(self.delete_handler)
+
+    def copy_handler(self, filecmd):
+        """Handle a filecopy command."""
+        raise NotImplementedError(self.copy_handler)
+
+    def rename_handler(self, filecmd):
+        """Handle a filerename command."""
+        raise NotImplementedError(self.rename_handler)
+
+    def deleteall_handler(self, filecmd):
+        """Handle a filedeleteall command."""
+        raise NotImplementedError(self.deleteall_handler)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/processors/filter_processor.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,300 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Import processor that filters the input (and doesn't import)."""
+from fastimport import (
+    commands,
+    helpers,
+    processor,
+    )
+import stat
+
+
+class FilterProcessor(processor.ImportProcessor):
+    """An import processor that filters the input to include/exclude objects.
+
+    No changes to the current repository are made.
+
+    Here are the supported parameters:
+
+    * include_paths - a list of paths that commits must change in order to
+      be kept in the output stream
+
+    * exclude_paths - a list of paths that should not appear in the output
+      stream
+
+    * squash_empty_commits - if set to False, squash commits that don't have
+      any changes after the filter has been applied
+    """
+
+    known_params = [
+        b'include_paths',
+        b'exclude_paths',
+        b'squash_empty_commits'
+    ]
+
+    def pre_process(self):
+        self.includes = self.params.get(b'include_paths')
+        self.excludes = self.params.get(b'exclude_paths')
+        self.squash_empty_commits = bool(
+            self.params.get(b'squash_empty_commits', True))
+        # What's the new root, if any
+        self.new_root = helpers.common_directory(self.includes)
+        # Buffer of blobs until we know we need them: mark -> cmd
+        self.blobs = {}
+        # These are the commits we've squashed so far
+        self.squashed_commits = set()
+        # Map of commit-id to list of parents
+        self.parents = {}
+
+    def pre_handler(self, cmd):
+        self.command = cmd
+        # Should this command be included in the output or not?
+        self.keep = False
+        # Blobs to dump into the output before dumping the command itself
+        self.referenced_blobs = []
+
+    def post_handler(self, cmd):
+        if not self.keep:
+            return
+        # print referenced blobs and the command
+        for blob_id in self.referenced_blobs:
+            self._print_command(self.blobs[blob_id])
+        self._print_command(self.command)
+
+    def progress_handler(self, cmd):
+        """Process a ProgressCommand."""
+        # These always pass through
+        self.keep = True
+
+    def blob_handler(self, cmd):
+        """Process a BlobCommand."""
+        # These never pass through directly. We buffer them and only
+        # output them if referenced by an interesting command.
+        self.blobs[cmd.id] = cmd
+        self.keep = False
+
+    def checkpoint_handler(self, cmd):
+        """Process a CheckpointCommand."""
+        # These always pass through
+        self.keep = True
+
+    def commit_handler(self, cmd):
+        """Process a CommitCommand."""
+        # These pass through if they meet the filtering conditions
+        interesting_filecmds = self._filter_filecommands(cmd.iter_files)
+        if interesting_filecmds or not self.squash_empty_commits:
+            # If all we have is a single deleteall, skip this commit
+            if len(interesting_filecmds) == 1 and isinstance(
+                interesting_filecmds[0], commands.FileDeleteAllCommand):
+                pass
+            else:
+                # Remember just the interesting file commands
+                self.keep = True
+                cmd.file_iter = iter(interesting_filecmds)
+
+                # Record the referenced blobs
+                for fc in interesting_filecmds:
+                    if isinstance(fc, commands.FileModifyCommand):
+                        if (fc.dataref is not None and
+                            not stat.S_ISDIR(fc.mode)):
+                            self.referenced_blobs.append(fc.dataref)
+
+                # Update from and merges to refer to commits in the output
+                cmd.from_ = self._find_interesting_from(cmd.from_)
+                cmd.merges = self._find_interesting_merges(cmd.merges)
+        else:
+            self.squashed_commits.add(cmd.id)
+
+        # Keep track of the parents
+        if cmd.from_ and cmd.merges:
+            parents = [cmd.from_] + cmd.merges
+        elif cmd.from_:
+            parents = [cmd.from_]
+        else:
+            parents = None
+        if cmd.mark is not None:
+            self.parents[b':' + cmd.mark] = parents
+
+    def reset_handler(self, cmd):
+        """Process a ResetCommand."""
+        if cmd.from_ is None:
+            # We pass through resets that init a branch because we have to
+            # assume the branch might be interesting.
+            self.keep = True
+        else:
+            # Keep resets if they indirectly reference something we kept
+            cmd.from_ = self._find_interesting_from(cmd.from_)
+            self.keep = cmd.from_ is not None
+
+    def tag_handler(self, cmd):
+        """Process a TagCommand."""
+        # Keep tags if they indirectly reference something we kept
+        cmd.from_ = self._find_interesting_from(cmd.from_)
+        self.keep = cmd.from_ is not None
+
+    def feature_handler(self, cmd):
+        """Process a FeatureCommand."""
+        feature = cmd.feature_name
+        if feature not in commands.FEATURE_NAMES:
+            self.warning("feature %s is not supported - parsing may fail"
+                % (feature,))
+        # These always pass through
+        self.keep = True
+
+    def _print_command(self, cmd):
+        """Wrapper to avoid adding unnecessary blank lines."""
+        text = helpers.repr_bytes(cmd)
+        self.outf.write(text)
+        if not text.endswith(b'\n'):
+            self.outf.write(b'\n')
+
+    def _filter_filecommands(self, filecmd_iter):
+        """Return the filecommands filtered by includes & excludes.
+
+        :return: a list of FileCommand objects
+        """
+        if self.includes is None and self.excludes is None:
+            return list(filecmd_iter())
+
+        # Do the filtering, adjusting for the new_root
+        result = []
+        for fc in filecmd_iter():
+            if (isinstance(fc, commands.FileModifyCommand) or
+                isinstance(fc, commands.FileDeleteCommand)):
+                if self._path_to_be_kept(fc.path):
+                    fc.path = self._adjust_for_new_root(fc.path)
+                else:
+                    continue
+            elif isinstance(fc, commands.FileDeleteAllCommand):
+                pass
+            elif isinstance(fc, commands.FileRenameCommand):
+                fc = self._convert_rename(fc)
+            elif isinstance(fc, commands.FileCopyCommand):
+                fc = self._convert_copy(fc)
+            else:
+                self.warning("cannot handle FileCommands of class %s - ignoring",
+                        fc.__class__)
+                continue
+            if fc is not None:
+                result.append(fc)
+        return result
+
+    def _path_to_be_kept(self, path):
+        """Does the given path pass the filtering criteria?"""
+        if self.excludes and (path in self.excludes
+                or helpers.is_inside_any(self.excludes, path)):
+            return False
+        if self.includes:
+            return (path in self.includes
+                or helpers.is_inside_any(self.includes, path))
+        return True
+
+    def _adjust_for_new_root(self, path):
+        """Adjust a path given the new root directory of the output."""
+        if self.new_root is None:
+            return path
+        elif path.startswith(self.new_root):
+            return path[len(self.new_root):]
+        else:
+            return path
+
+    def _find_interesting_parent(self, commit_ref):
+        while True:
+            if commit_ref not in self.squashed_commits:
+                return commit_ref
+            parents = self.parents.get(commit_ref)
+            if not parents:
+                return None
+            commit_ref = parents[0]
+
+    def _find_interesting_from(self, commit_ref):
+        if commit_ref is None:
+            return None
+        return self._find_interesting_parent(commit_ref)
+
+    def _find_interesting_merges(self, commit_refs):
+        if commit_refs is None:
+            return None
+        merges = []
+        for commit_ref in commit_refs:
+            parent = self._find_interesting_parent(commit_ref)
+            if parent is not None:
+                merges.append(parent)
+        if merges:
+            return merges
+        else:
+            return None
+
+    def _convert_rename(self, fc):
+        """Convert a FileRenameCommand into a new FileCommand.
+
+        :return: None if the rename is being ignored, otherwise a
+          new FileCommand based on the whether the old and new paths
+          are inside or outside of the interesting locations.
+          """
+        old = fc.old_path
+        new = fc.new_path
+        keep_old = self._path_to_be_kept(old)
+        keep_new = self._path_to_be_kept(new)
+        if keep_old and keep_new:
+            fc.old_path = self._adjust_for_new_root(old)
+            fc.new_path = self._adjust_for_new_root(new)
+            return fc
+        elif keep_old:
+            # The file has been renamed to a non-interesting location.
+            # Delete it!
+            old = self._adjust_for_new_root(old)
+            return commands.FileDeleteCommand(old)
+        elif keep_new:
+            # The file has been renamed into an interesting location
+            # We really ought to add it but we don't currently buffer
+            # the contents of all previous files and probably never want
+            # to. Maybe fast-import-info needs to be extended to
+            # remember all renames and a config file can be passed
+            # into here ala fast-import?
+            self.warning("cannot turn rename of %s into an add of %s yet" %
+                (old, new))
+        return None
+
+    def _convert_copy(self, fc):
+        """Convert a FileCopyCommand into a new FileCommand.
+
+        :return: None if the copy is being ignored, otherwise a
+          new FileCommand based on the whether the source and destination
+          paths are inside or outside of the interesting locations.
+          """
+        src = fc.src_path
+        dest = fc.dest_path
+        keep_src = self._path_to_be_kept(src)
+        keep_dest = self._path_to_be_kept(dest)
+        if keep_src and keep_dest:
+            fc.src_path = self._adjust_for_new_root(src)
+            fc.dest_path = self._adjust_for_new_root(dest)
+            return fc
+        elif keep_src:
+            # The file has been copied to a non-interesting location.
+            # Ignore it!
+            return None
+        elif keep_dest:
+            # The file has been copied into an interesting location
+            # We really ought to add it but we don't currently buffer
+            # the contents of all previous files and probably never want
+            # to. Maybe fast-import-info needs to be extended to
+            # remember all copies and a config file can be passed
+            # into here ala fast-import?
+            self.warning("cannot turn copy of %s into an add of %s yet" %
+                (src, dest))
+        return None
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/processors/info_processor.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,286 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Import processor that dump stats about the input (and doesn't import)."""
+
+from __future__ import absolute_import
+
+from .. import (
+    reftracker,
+    )
+from ..helpers import (
+    invert_dict,
+    invert_dictset,
+    )
+from fastimport import (
+    commands,
+    processor,
+    )
+import stat
+
+
+class InfoProcessor(processor.ImportProcessor):
+    """An import processor that dumps statistics about the input.
+
+    No changes to the current repository are made.
+
+    As well as providing useful information about an import
+    stream before importing it, this processor is useful for
+    benchmarking the speed at which data can be extracted from
+    the source.
+    """
+
+    def __init__(self, params=None, verbose=0, outf=None):
+        processor.ImportProcessor.__init__(self, params, verbose,
+            outf=outf)
+
+    def pre_process(self):
+        # Init statistics
+        self.cmd_counts = {}
+        for cmd in commands.COMMAND_NAMES:
+            self.cmd_counts[cmd] = 0
+        self.file_cmd_counts = {}
+        for fc in commands.FILE_COMMAND_NAMES:
+            self.file_cmd_counts[fc] = 0
+        self.parent_counts = {}
+        self.max_parent_count = 0
+        self.committers = set()
+        self.separate_authors_found = False
+        self.symlinks_found = False
+        self.executables_found = False
+        self.sha_blob_references = False
+        self.lightweight_tags = 0
+        # Blob usage tracking
+        self.blobs = {}
+        for usage in ['new', 'used', 'unknown', 'unmarked']:
+            self.blobs[usage] = set()
+        self.blob_ref_counts = {}
+        # Head tracking
+        self.reftracker = reftracker.RefTracker()
+        # Stuff to cache: a map from mark to # of times that mark is merged
+        self.merges = {}
+        # Stuff to cache: these are maps from mark to sets
+        self.rename_old_paths = {}
+        self.copy_source_paths = {}
+
+    def post_process(self):
+        # Dump statistics
+        cmd_names = commands.COMMAND_NAMES
+        fc_names = commands.FILE_COMMAND_NAMES
+        self._dump_stats_group("Command counts",
+            [(c.decode('utf-8'), self.cmd_counts[c]) for c in cmd_names], str)
+        self._dump_stats_group("File command counts", 
+            [(c.decode('utf-8'), self.file_cmd_counts[c]) for c in fc_names], str)
+
+        # Commit stats
+        if self.cmd_counts[b'commit']:
+            p_items = []
+            for i in range(self.max_parent_count + 1):
+                if i in self.parent_counts:
+                    count = self.parent_counts[i]
+                    p_items.append(("parents-%d" % i, count))
+            merges_count = len(self.merges)
+            p_items.append(('total revisions merged', merges_count))
+            flags = {
+                'separate authors found': self.separate_authors_found,
+                'executables': self.executables_found,
+                'symlinks': self.symlinks_found,
+                'blobs referenced by SHA': self.sha_blob_references,
+                }
+            self._dump_stats_group("Parent counts", p_items, str)
+            self._dump_stats_group("Commit analysis", sorted(flags.items()), _found)
+            heads = invert_dictset(self.reftracker.heads)
+            self._dump_stats_group(
+                    "Head analysis",
+                    [(k.decode('utf-8'),
+                        ', '.join([m.decode('utf-8') for m in v]))
+                        for (k, v) in heads.items()], None,
+                    _iterable_as_config_list)
+            # note("\t%d\t%s" % (len(self.committers), 'unique committers'))
+            self._dump_stats_group("Merges", self.merges.items(), None)
+            # We only show the rename old path and copy source paths when -vv
+            # (verbose=2) is specified. The output here for mysql's data can't
+            # be parsed currently so this bit of code needs more work anyhow ..
+            if self.verbose >= 2:
+                self._dump_stats_group("Rename old paths",
+                    self.rename_old_paths.items(), len,
+                    _iterable_as_config_list)
+                self._dump_stats_group("Copy source paths",
+                    self.copy_source_paths.items(), len,
+                    _iterable_as_config_list)
+
+        # Blob stats
+        if self.cmd_counts[b'blob']:
+            # In verbose mode, don't list every blob used
+            if self.verbose:
+                del self.blobs['used']
+            self._dump_stats_group("Blob usage tracking",
+                self.blobs.items(), len, _iterable_as_config_list)
+        if self.blob_ref_counts:
+            blobs_by_count = invert_dict(self.blob_ref_counts)
+            blob_items = sorted(blobs_by_count.items())
+            self._dump_stats_group("Blob reference counts",
+                blob_items, len, _iterable_as_config_list)
+
+        # Other stats
+        if self.cmd_counts[b'reset']:
+            reset_stats = {
+                'lightweight tags': self.lightweight_tags,
+                }
+            self._dump_stats_group("Reset analysis", reset_stats.items())
+
+    def _dump_stats_group(self, title, items, normal_formatter=None,
+        verbose_formatter=None):
+        """Dump a statistics group.
+        
+        In verbose mode, do so as a config file so
+        that other processors can load the information if they want to.
+        :param normal_formatter: the callable to apply to the value
+          before displaying it in normal mode
+        :param verbose_formatter: the callable to apply to the value
+          before displaying it in verbose mode
+        """
+        if self.verbose:
+            self.outf.write("[%s]\n" % (title,))
+            for name, value in items:
+                if verbose_formatter is not None:
+                    value = verbose_formatter(value)
+                if type(name) == str:
+                    name = name.replace(' ', '-')
+                self.outf.write("%s = %s\n" % (name, value))
+            self.outf.write("\n")
+        else:
+            self.outf.write("%s:\n" % (title,))
+            for name, value in items:
+                if normal_formatter is not None:
+                    value = normal_formatter(value)
+                self.outf.write("\t%s\t%s\n" % (value, name))
+
+    def progress_handler(self, cmd):
+        """Process a ProgressCommand."""
+        self.cmd_counts[cmd.name] += 1
+
+    def blob_handler(self, cmd):
+        """Process a BlobCommand."""
+        self.cmd_counts[cmd.name] += 1
+        if cmd.mark is None:
+            self.blobs['unmarked'].add(cmd.id)
+        else:
+            self.blobs['new'].add(cmd.id)
+            # Marks can be re-used so remove it from used if already there.
+            # Note: we definitely do NOT want to remove it from multi if
+            # it's already in that set.
+            try:
+                self.blobs['used'].remove(cmd.id)
+            except KeyError:
+                pass
+
+    def checkpoint_handler(self, cmd):
+        """Process a CheckpointCommand."""
+        self.cmd_counts[cmd.name] += 1
+
+    def commit_handler(self, cmd):
+        """Process a CommitCommand."""
+        self.cmd_counts[cmd.name] += 1
+        self.committers.add(cmd.committer)
+        if cmd.author is not None:
+            self.separate_authors_found = True
+        for fc in cmd.iter_files():
+            self.file_cmd_counts[fc.name] += 1
+            if isinstance(fc, commands.FileModifyCommand):
+                if fc.mode & 0o111:
+                    self.executables_found = True
+                if stat.S_ISLNK(fc.mode):
+                    self.symlinks_found = True
+                if fc.dataref is not None:
+                    if fc.dataref[0] == ':':
+                        self._track_blob(fc.dataref)
+                    else:
+                        self.sha_blob_references = True
+            elif isinstance(fc, commands.FileRenameCommand):
+                self.rename_old_paths.setdefault(cmd.id, set()).add(fc.old_path)
+            elif isinstance(fc, commands.FileCopyCommand):
+                self.copy_source_paths.setdefault(cmd.id, set()).add(fc.src_path)
+
+        # Track the heads
+        parents = self.reftracker.track_heads(cmd)
+
+        # Track the parent counts
+        parent_count = len(parents)
+        try:
+            self.parent_counts[parent_count] += 1
+        except KeyError:
+            self.parent_counts[parent_count] = 1
+            if parent_count > self.max_parent_count:
+                self.max_parent_count = parent_count
+
+        # Remember the merges
+        if cmd.merges:
+            #self.merges.setdefault(cmd.ref, set()).update(cmd.merges)
+            for merge in cmd.merges:
+                if merge in self.merges:
+                    self.merges[merge] += 1
+                else:
+                    self.merges[merge] = 1
+
+    def reset_handler(self, cmd):
+        """Process a ResetCommand."""
+        self.cmd_counts[cmd.name] += 1
+        if cmd.ref.startswith('refs/tags/'):
+            self.lightweight_tags += 1
+        else:
+            if cmd.from_ is not None:
+                self.reftracker.track_heads_for_ref(
+                    cmd.ref, cmd.from_)
+
+    def tag_handler(self, cmd):
+        """Process a TagCommand."""
+        self.cmd_counts[cmd.name] += 1
+
+    def feature_handler(self, cmd):
+        """Process a FeatureCommand."""
+        self.cmd_counts[cmd.name] += 1
+        feature = cmd.feature_name
+        if feature not in commands.FEATURE_NAMES:
+            self.warning("feature %s is not supported - parsing may fail"
+                % (feature,))
+
+    def _track_blob(self, mark):
+        if mark in self.blob_ref_counts:
+            self.blob_ref_counts[mark] += 1
+            pass
+        elif mark in self.blobs['used']:
+            self.blob_ref_counts[mark] = 2
+            self.blobs['used'].remove(mark)
+        elif mark in self.blobs['new']:
+            self.blobs['used'].add(mark)
+            self.blobs['new'].remove(mark)
+        else:
+            self.blobs['unknown'].add(mark)
+
+def _found(b):
+    """Format a found boolean as a string."""
+    return ['no', 'found'][b]
+
+def _iterable_as_config_list(s):
+    """Format an iterable as a sequence of comma-separated strings.
+    
+    To match what ConfigObj expects, a single item list has a trailing comma.
+    """
+    items = sorted(s)
+    if len(items) == 1:
+        return "%s," % (items[0],)
+    else:
+        return ", ".join(items)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/processors/query_processor.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,98 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Import processor that queries the input (and doesn't import)."""
+from __future__ import print_function
+
+
+from fastimport import (
+    commands,
+    processor,
+    )
+
+
+class QueryProcessor(processor.ImportProcessor):
+    """An import processor that queries the input.
+
+    No changes to the current repository are made.
+    """
+
+    known_params = (
+        commands.COMMAND_NAMES +
+        commands.FILE_COMMAND_NAMES +
+        [b'commit-mark']
+    )
+
+    def __init__(self, params=None, verbose=False):
+        processor.ImportProcessor.__init__(self, params, verbose)
+        self.parsed_params = {}
+        self.interesting_commit = None
+        self._finished = False
+        if params:
+            if 'commit-mark' in params:
+                self.interesting_commit = params['commit-mark']
+                del params['commit-mark']
+            for name, value in params.items():
+                if value == 1:
+                    # All fields
+                    fields = None
+                else:
+                    fields = value.split(',')
+                self.parsed_params[name] = fields
+
+    def pre_handler(self, cmd):
+        """Hook for logic before each handler starts."""
+        if self._finished:
+            return
+        if self.interesting_commit and cmd.name == 'commit':
+            if cmd.mark == self.interesting_commit:
+                print(cmd.to_string())
+                self._finished = True
+            return
+        if cmd.name in self.parsed_params:
+            fields = self.parsed_params[cmd.name]
+            str = cmd.dump_str(fields, self.parsed_params, self.verbose)
+            print("%s" % (str,))
+
+    def progress_handler(self, cmd):
+        """Process a ProgressCommand."""
+        pass
+
+    def blob_handler(self, cmd):
+        """Process a BlobCommand."""
+        pass
+
+    def checkpoint_handler(self, cmd):
+        """Process a CheckpointCommand."""
+        pass
+
+    def commit_handler(self, cmd):
+        """Process a CommitCommand."""
+        pass
+
+    def reset_handler(self, cmd):
+        """Process a ResetCommand."""
+        pass
+
+    def tag_handler(self, cmd):
+        """Process a TagCommand."""
+        pass
+
+    def feature_handler(self, cmd):
+        """Process a FeatureCommand."""
+        feature = cmd.feature_name
+        if feature not in commands.FEATURE_NAMES:
+            self.warning("feature %s is not supported - parsing may fail"
+                % (feature,))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/reftracker.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,68 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+
+"""Tracker of refs."""
+
+from __future__ import absolute_import
+
+
+class RefTracker(object):
+
+    def __init__(self):
+        # Head tracking: last ref, last id per ref & map of commit ids to ref*s*
+        self.last_ref = None
+        self.last_ids = {}
+        self.heads = {}
+
+    def dump_stats(self, note):
+        self._show_stats_for(self.last_ids, "last-ids", note=note)
+        self._show_stats_for(self.heads, "heads", note=note)
+
+    def clear(self):
+        self.last_ids.clear()
+        self.heads.clear()
+
+    def track_heads(self, cmd):
+        """Track the repository heads given a CommitCommand.
+
+        :param cmd: the CommitCommand
+        :return: the list of parents in terms of commit-ids
+        """
+        # Get the true set of parents
+        if cmd.from_ is not None:
+            parents = [cmd.from_]
+        else:
+            last_id = self.last_ids.get(cmd.ref)
+            if last_id is not None:
+                parents = [last_id]
+            else:
+                parents = []
+        parents.extend(cmd.merges)
+
+        # Track the heads
+        self.track_heads_for_ref(cmd.ref, cmd.id, parents)
+        return parents
+
+    def track_heads_for_ref(self, cmd_ref, cmd_id, parents=None):
+        if parents is not None:
+            for parent in parents:
+                if parent in self.heads:
+                    del self.heads[parent]
+        self.heads.setdefault(cmd_id, set()).add(cmd_ref)
+        self.last_ids[cmd_ref] = cmd_id
+        self.last_ref = cmd_ref
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/tests/__init__.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,39 @@
+# __init__.py -- The tests for python-fastimport
+# Copyright (C) 2010 Canonical, Ltd.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License or (at your option) any later version of
+# the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Tests for fastimport."""
+
+import unittest
+
+
+def test_suite():
+    names = [
+        'test_commands',
+        'test_dates',
+        'test_errors',
+        'test_filter_processor',
+        'test_info_processor',
+        'test_helpers',
+        'test_parser',
+        ]
+    module_names = ['fastimport.tests.' + name for name in names]
+    result = unittest.TestSuite()
+    loader = unittest.TestLoader()
+    suite = loader.loadTestsFromNames(module_names)
+    result.addTests(suite)
+
+    return result
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/tests/test_commands.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,471 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Test how Commands are displayed"""
+
+from unittest import TestCase
+
+from fastimport.helpers import (
+    repr_bytes,
+    utf8_bytes_string,
+    )
+
+from fastimport import (
+    commands,
+    )
+
+
+class TestBlobDisplay(TestCase):
+
+    def test_blob(self):
+        c = commands.BlobCommand(b"1", b"hello world")
+        self.assertEqual(b"blob\nmark :1\ndata 11\nhello world", repr_bytes(c))
+
+    def test_blob_no_mark(self):
+        c = commands.BlobCommand(None, b"hello world")
+        self.assertEqual(b"blob\ndata 11\nhello world", repr_bytes(c))
+
+
+class TestCheckpointDisplay(TestCase):
+
+    def test_checkpoint(self):
+        c = commands.CheckpointCommand()
+        self.assertEqual(b'checkpoint', repr_bytes(c))
+
+
+class TestCommitDisplay(TestCase):
+
+    def test_commit(self):
+        # user tuple is (name, email, secs-since-epoch, secs-offset-from-utc)
+        committer = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        c = commands.CommitCommand(b"refs/heads/master", b"bbb", None, committer,
+            b"release v1.0", b":aaa", None, None)
+        self.assertEqual(
+            b"commit refs/heads/master\n"
+            b"mark :bbb\n"
+            b"committer Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 12\n"
+            b"release v1.0\n"
+            b"from :aaa",
+            repr_bytes(c))
+
+    def test_commit_unicode_committer(self):
+        # user tuple is (name, email, secs-since-epoch, secs-offset-from-utc)
+        name = u'\u013d\xf3r\xe9m \xcdp\u0161\xfam'
+
+        commit_utf8 = utf8_bytes_string(
+            u"commit refs/heads/master\n"
+            u"mark :bbb\n"
+            u"committer %s <test@example.com> 1234567890 -0600\n"
+            u"data 12\n"
+            u"release v1.0\n"
+            u"from :aaa" % (name,)
+        )
+
+        committer = (name, b'test@example.com', 1234567890, -6 * 3600)
+        c = commands.CommitCommand(b'refs/heads/master', b'bbb', None, committer,
+            b'release v1.0', b':aaa', None, None)
+
+        self.assertEqual(commit_utf8, repr_bytes(c))
+
+    def test_commit_no_mark(self):
+        # user tuple is (name, email, secs-since-epoch, secs-offset-from-utc)
+        committer = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        c = commands.CommitCommand(b'refs/heads/master', None, None, committer,
+           b'release v1.0', b':aaa', None, None)
+        self.assertEqual(
+            b"commit refs/heads/master\n"
+            b"committer Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 12\n"
+            b"release v1.0\n"
+            b"from :aaa",
+            repr_bytes(c))
+
+    def test_commit_no_from(self):
+        # user tuple is (name, email, secs-since-epoch, secs-offset-from-utc)
+        committer = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        c = commands.CommitCommand(b"refs/heads/master", b"bbb", None, committer,
+            b"release v1.0", None, None, None)
+        self.assertEqual(
+            b"commit refs/heads/master\n"
+            b"mark :bbb\n"
+            b"committer Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 12\n"
+            b"release v1.0",
+            repr_bytes(c))
+
+    def test_commit_with_author(self):
+        # user tuple is (name, email, secs-since-epoch, secs-offset-from-utc)
+        author = (b'Sue Wong', b'sue@example.com', 1234565432, -6 * 3600)
+        committer = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        c = commands.CommitCommand(b'refs/heads/master', b'bbb', author,
+            committer, b'release v1.0', b':aaa', None, None)
+        self.assertEqual(
+            b"commit refs/heads/master\n"
+            b"mark :bbb\n"
+            b"author Sue Wong <sue@example.com> 1234565432 -0600\n"
+            b"committer Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 12\n"
+            b"release v1.0\n"
+            b"from :aaa",
+            repr_bytes(c))
+
+    def test_commit_with_merges(self):
+        # user tuple is (name, email, secs-since-epoch, secs-offset-from-utc)
+        committer = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        c = commands.CommitCommand(b"refs/heads/master", b"ddd", None, committer,
+                b'release v1.0', b":aaa", [b':bbb', b':ccc'], None)
+        self.assertEqual(
+            b"commit refs/heads/master\n"
+            b"mark :ddd\n"
+            b"committer Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 12\n"
+            b"release v1.0\n"
+            b"from :aaa\n"
+            b"merge :bbb\n"
+            b"merge :ccc",
+            repr_bytes(c))
+
+    def test_commit_with_filecommands(self):
+        file_cmds = iter([
+            commands.FileDeleteCommand(b'readme.txt'),
+            commands.FileModifyCommand(b'NEWS', 0o100644, None,
+                b'blah blah blah'),
+            ])
+        # user tuple is (name, email, secs-since-epoch, secs-offset-from-utc)
+        committer = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        c = commands.CommitCommand(b'refs/heads/master', b'bbb', None, committer,
+            b'release v1.0', b':aaa', None, file_cmds)
+        self.assertEqual(
+            b"commit refs/heads/master\n"
+            b"mark :bbb\n"
+            b"committer Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 12\n"
+            b"release v1.0\n"
+            b"from :aaa\n"
+            b"D readme.txt\n"
+            b"M 644 inline NEWS\n"
+            b"data 14\n"
+            b"blah blah blah",
+            repr_bytes(c))
+
+    def test_commit_with_more_authors(self):
+        # user tuple is (name, email, secs-since-epoch, secs-offset-from-utc)
+        author = (b'Sue Wong', b'sue@example.com', 1234565432, -6 * 3600)
+        committer = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        more_authors = [
+            (b'Al Smith', b'al@example.com', 1234565432, -6 * 3600),
+            (b'Bill Jones', b'bill@example.com', 1234565432, -6 * 3600),
+        ]
+        c = commands.CommitCommand(b'refs/heads/master', b'bbb', author,
+            committer, b'release v1.0', b':aaa', None, None,
+            more_authors=more_authors)
+        self.assertEqual(
+            b"commit refs/heads/master\n"
+            b"mark :bbb\n"
+            b"author Sue Wong <sue@example.com> 1234565432 -0600\n"
+            b"author Al Smith <al@example.com> 1234565432 -0600\n"
+            b"author Bill Jones <bill@example.com> 1234565432 -0600\n"
+            b"committer Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 12\n"
+            b"release v1.0\n"
+            b"from :aaa",
+            repr_bytes(c))
+
+    def test_commit_with_properties(self):
+        # user tuple is (name, email, secs-since-epoch, secs-offset-from-utc)
+        committer = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        properties = {
+            u'greeting':  u'hello',
+            u'planet':    u'world',
+            }
+        c = commands.CommitCommand(b'refs/heads/master', b'bbb', None,
+            committer, b'release v1.0', b':aaa', None, None,
+            properties=properties)
+        self.assertEqual(
+            b"commit refs/heads/master\n"
+            b"mark :bbb\n"
+            b"committer Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 12\n"
+            b"release v1.0\n"
+            b"from :aaa\n"
+            b"property greeting 5 hello\n"
+            b"property planet 5 world",
+            repr_bytes(c))
+
+    def test_commit_with_int_mark(self):
+        # user tuple is (name, email, secs-since-epoch, secs-offset-from-utc)
+        committer = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        properties = {
+            u'greeting':  u'hello',
+            u'planet':    u'world',
+            }
+        c = commands.CommitCommand(b'refs/heads/master', 123, None,
+            committer, b'release v1.0', b':aaa', None, None,
+            properties=properties)
+        self.assertEqual(
+            b"commit refs/heads/master\n"
+            b"mark :123\n"
+            b"committer Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 12\n"
+            b"release v1.0\n"
+            b"from :aaa\n"
+            b"property greeting 5 hello\n"
+            b"property planet 5 world",
+            repr_bytes(c))
+
+class TestCommitCopy(TestCase):
+
+    def setUp(self):
+        super(TestCommitCopy, self).setUp()
+        file_cmds = iter([
+            commands.FileDeleteCommand(b'readme.txt'),
+            commands.FileModifyCommand(b'NEWS', 0o100644, None, b'blah blah blah'),
+        ])
+
+        committer = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        self.c = commands.CommitCommand(
+            b'refs/heads/master', b'bbb', None, committer,
+            b'release v1.0', b':aaa', None, file_cmds)
+
+    def test_simple_copy(self):
+        c2 = self.c.copy()
+
+        self.assertFalse(self.c is c2)
+        self.assertEqual(repr_bytes(self.c), repr_bytes(c2))
+
+    def test_replace_attr(self):
+        c2 = self.c.copy(mark=b'ccc')
+        self.assertEqual(
+            repr_bytes(self.c).replace(b'mark :bbb', b'mark :ccc'),
+            repr_bytes(c2)
+        )
+
+    def test_invalid_attribute(self):
+        self.assertRaises(TypeError, self.c.copy, invalid=True)
+
+class TestFeatureDisplay(TestCase):
+
+    def test_feature(self):
+        c = commands.FeatureCommand(b"dwim")
+        self.assertEqual(b"feature dwim", repr_bytes(c))
+
+    def test_feature_with_value(self):
+        c = commands.FeatureCommand(b"dwim", b"please")
+        self.assertEqual(b"feature dwim=please", repr_bytes(c))
+
+
+class TestProgressDisplay(TestCase):
+
+    def test_progress(self):
+        c = commands.ProgressCommand(b"doing foo")
+        self.assertEqual(b"progress doing foo", repr_bytes(c))
+
+
+class TestResetDisplay(TestCase):
+
+    def test_reset(self):
+        c = commands.ResetCommand(b"refs/tags/v1.0", b":xxx")
+        self.assertEqual(b"reset refs/tags/v1.0\nfrom :xxx\n", repr_bytes(c))
+
+    def test_reset_no_from(self):
+        c = commands.ResetCommand(b'refs/remotes/origin/master', None)
+        self.assertEqual(b'reset refs/remotes/origin/master', repr_bytes(c))
+
+
+class TestTagDisplay(TestCase):
+
+    def test_tag(self):
+        # tagger tuple is (name, email, secs-since-epoch, secs-offset-from-utc)
+        tagger = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        c = commands.TagCommand(b'refs/tags/v1.0', b':xxx', tagger, b'create v1.0')
+        self.assertEqual(
+            b"tag refs/tags/v1.0\n"
+            b"from :xxx\n"
+            b"tagger Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 11\n"
+            b"create v1.0",
+            repr_bytes(c))
+
+    def test_tag_no_from(self):
+        tagger = (b'Joe Wong', b'joe@example.com', 1234567890, -6 * 3600)
+        c = commands.TagCommand(b'refs/tags/v1.0', None, tagger, b'create v1.0')
+        self.assertEqual(
+            b"tag refs/tags/v1.0\n"
+            b"tagger Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 11\n"
+            b"create v1.0",
+            repr_bytes(c))
+
+
+class TestFileModifyDisplay(TestCase):
+
+    def test_filemodify_file(self):
+        c = commands.FileModifyCommand(b'foo/bar', 0o100644, b':23', None)
+        self.assertEqual(b'M 644 :23 foo/bar', repr_bytes(c))
+
+    def test_filemodify_file_executable(self):
+        c = commands.FileModifyCommand(b'foo/bar', 0o100755, b':23', None)
+        self.assertEqual(b'M 755 :23 foo/bar', repr_bytes(c))
+
+    def test_filemodify_file_internal(self):
+        c = commands.FileModifyCommand(b'foo/bar', 0o100644, None,
+            b'hello world')
+        self.assertEqual(b'M 644 inline foo/bar\ndata 11\nhello world', repr_bytes(c))
+
+    def test_filemodify_symlink(self):
+        c = commands.FileModifyCommand(b'foo/bar', 0o120000, None, b'baz')
+        self.assertEqual(b'M 120000 inline foo/bar\ndata 3\nbaz', repr_bytes(c))
+
+    def test_filemodify_treeref(self):
+        c = commands.FileModifyCommand(b'tree-info', 0o160000,
+            b'revision-id-info', None)
+        self.assertEqual(b'M 160000 revision-id-info tree-info', repr_bytes(c))
+
+
+class TestFileDeleteDisplay(TestCase):
+
+    def test_filedelete(self):
+        c = commands.FileDeleteCommand(b'foo/bar')
+        self.assertEqual(b'D foo/bar', repr_bytes(c))
+
+
+class TestFileCopyDisplay(TestCase):
+
+    def test_filecopy(self):
+        c = commands.FileCopyCommand(b'foo/bar', b'foo/baz')
+        self.assertEqual(b'C foo/bar foo/baz', repr_bytes(c))
+
+    def test_filecopy_quoted(self):
+        # Check the first path is quoted if it contains spaces
+        c = commands.FileCopyCommand(b'foo/b a r', b'foo/b a z')
+        self.assertEqual(b'C "foo/b a r" foo/b a z', repr_bytes(c))
+
+
+class TestFileRenameDisplay(TestCase):
+
+    def test_filerename(self):
+        c = commands.FileRenameCommand(b'foo/bar', b'foo/baz')
+        self.assertEqual(b'R foo/bar foo/baz', repr_bytes(c))
+
+    def test_filerename_quoted(self):
+        # Check the first path is quoted if it contains spaces
+        c = commands.FileRenameCommand(b'foo/b a r', b'foo/b a z')
+        self.assertEqual(b'R "foo/b a r" foo/b a z', repr_bytes(c))
+
+
+class TestFileDeleteAllDisplay(TestCase):
+
+    def test_filedeleteall(self):
+        c = commands.FileDeleteAllCommand()
+        self.assertEqual(b'deleteall', repr_bytes(c))
+
+class TestNotesDisplay(TestCase):
+
+    def test_noteonly(self):
+        c = commands.NoteModifyCommand(b'foo', b'A basic note')
+        self.assertEqual(b'N inline :foo\ndata 12\nA basic note', repr_bytes(c))
+
+    def test_notecommit(self):
+        committer = (b'Ed Mund', b'ed@example.org', 1234565432, 0)
+
+        commits = [
+            commands.CommitCommand(
+                ref=b'refs/heads/master',
+                mark=b'1',
+                author=committer,
+                committer=committer,
+                message=b'test\n',
+                from_=None,
+                merges=[],
+                file_iter=[
+                    commands.FileModifyCommand(b'bar', 0o100644, None, b'')
+                ]),
+            commands.CommitCommand(
+                ref=b'refs/notes/commits',
+                mark=None,
+                author=None,
+                committer=committer,
+                message=b"Notes added by 'git notes add'\n",
+                from_=None,
+                merges=[],
+                file_iter=[
+                    commands.NoteModifyCommand(b'1', b'Test note\n')
+                ]),
+            commands.CommitCommand(
+                ref=b'refs/notes/test',
+                mark=None,
+                author=None,
+                committer=committer,
+                message=b"Notes added by 'git notes add'\n",
+                from_=None,
+                merges=[],
+                file_iter=[
+                    commands.NoteModifyCommand(b'1', b'Test test\n')
+                ])
+        ]
+
+        self.assertEqual(
+            b"""commit refs/heads/master
+mark :1
+author Ed Mund <ed@example.org> 1234565432 +0000
+committer Ed Mund <ed@example.org> 1234565432 +0000
+data 5
+test
+
+M 644 inline bar
+data 0
+commit refs/notes/commits
+committer Ed Mund <ed@example.org> 1234565432 +0000
+data 31
+Notes added by 'git notes add'
+
+N inline :1
+data 10
+Test note
+commit refs/notes/test
+committer Ed Mund <ed@example.org> 1234565432 +0000
+data 31
+Notes added by 'git notes add'
+
+N inline :1
+data 10
+Test test
+""", b''.join([repr_bytes(s) for s in commits]))
+
+
+class TestPathChecking(TestCase):
+
+    def test_filemodify_path_checking(self):
+        self.assertRaises(ValueError, commands.FileModifyCommand, b'',
+            0o100644, None, b'text')
+        self.assertRaises(ValueError, commands.FileModifyCommand, None,
+            0o100644, None, b'text')
+
+    def test_filedelete_path_checking(self):
+        self.assertRaises(ValueError, commands.FileDeleteCommand, b'')
+        self.assertRaises(ValueError, commands.FileDeleteCommand, None)
+
+    def test_filerename_path_checking(self):
+        self.assertRaises(ValueError, commands.FileRenameCommand, b'', b'foo')
+        self.assertRaises(ValueError, commands.FileRenameCommand, None, b'foo')
+        self.assertRaises(ValueError, commands.FileRenameCommand, b'foo', b'')
+        self.assertRaises(ValueError, commands.FileRenameCommand, b'foo', None)
+
+    def test_filecopy_path_checking(self):
+        self.assertRaises(ValueError, commands.FileCopyCommand, b'', b'foo')
+        self.assertRaises(ValueError, commands.FileCopyCommand, None, b'foo')
+        self.assertRaises(ValueError, commands.FileCopyCommand, b'foo', b'')
+        self.assertRaises(ValueError, commands.FileCopyCommand, b'foo', None)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/tests/test_dates.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,34 @@
+# Copyright (C) 2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Test parsing of dates."""
+
+from unittest import TestCase
+
+from fastimport import (
+    dates,
+    )
+
+class ParseTzTests(TestCase):
+
+    def test_parse_tz_utc(self):
+        self.assertEqual(0, dates.parse_tz(b'+0000'))
+        self.assertEqual(0, dates.parse_tz(b'-0000'))
+
+    def test_parse_tz_cet(self):
+        self.assertEqual(3600, dates.parse_tz(b'+0100'))
+
+    def test_parse_tz_odd(self):
+        self.assertEqual(1864800, dates.parse_tz(b'+51800'))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/tests/test_errors.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,76 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Test the Import errors"""
+from unittest import TestCase
+
+from fastimport import (
+    errors,
+    )
+
+
+class TestErrors(TestCase):
+
+    def test_MissingBytes(self):
+        e = errors.MissingBytes(99, 10, 8)
+        self.assertEqual("line 99: Unexpected EOF - expected 10 bytes, found 8",
+            str(e))
+
+    def test_MissingTerminator(self):
+        e = errors.MissingTerminator(99, '---')
+        self.assertEqual("line 99: Unexpected EOF - expected '---' terminator",
+            str(e))
+
+    def test_InvalidCommand(self):
+        e = errors.InvalidCommand(99, 'foo')
+        self.assertEqual("line 99: Invalid command 'foo'",
+            str(e))
+
+    def test_MissingSection(self):
+        e = errors.MissingSection(99, 'foo', 'bar')
+        self.assertEqual("line 99: Command foo is missing section bar",
+            str(e))
+
+    def test_BadFormat(self):
+        e = errors.BadFormat(99, 'foo', 'bar', 'xyz')
+        self.assertEqual("line 99: Bad format for section bar in "
+            "command foo: found 'xyz'",
+            str(e))
+
+    def test_InvalidTimezone(self):
+        e = errors.InvalidTimezone(99, 'aa:bb')
+        self.assertEqual('aa:bb', e.timezone)
+        self.assertEqual('', e.reason)
+        self.assertEqual("line 99: Timezone 'aa:bb' could not be converted.",
+            str(e))
+        e = errors.InvalidTimezone(99, 'aa:bb', 'Non-numeric hours')
+        self.assertEqual('aa:bb', e.timezone)
+        self.assertEqual(' Non-numeric hours', e.reason)
+        self.assertEqual("line 99: Timezone 'aa:bb' could not be converted."
+             " Non-numeric hours",
+             str(e))
+
+    def test_UnknownDateFormat(self):
+        e = errors.UnknownDateFormat('aaa')
+        self.assertEqual("Unknown date format 'aaa'", str(e))
+
+    def test_MissingHandler(self):
+        e = errors.MissingHandler('foo')
+        self.assertEqual("Missing handler for command foo", str(e))
+
+    def test_UnknownFeature(self):
+        e = errors.UnknownFeature('aaa')
+        self.assertEqual("Unknown feature 'aaa' - try a later importer or "
+            "an earlier data format", str(e))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/tests/test_filter_processor.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,1107 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Test FilterProcessor"""
+from io import BytesIO
+
+from unittest import TestCase
+
+from fastimport import (
+    parser,
+    )
+
+from fastimport.processors import (
+    filter_processor,
+    )
+
+
+# A sample input stream containing all (top level) import commands
+_SAMPLE_ALL = \
+b"""blob
+mark :1
+data 4
+foo
+commit refs/heads/master
+mark :2
+committer Joe <joe@example.com> 1234567890 +1000
+data 14
+Initial import
+M 644 :1 COPYING
+checkpoint
+progress first import done
+reset refs/remote/origin/master
+from :2
+tag v0.1
+from :2
+tagger Joe <joe@example.com> 1234567890 +1000
+data 12
+release v0.1
+"""
+
+
+# A sample input stream creating the following tree:
+#
+#  NEWS
+#  doc/README.txt
+#  doc/index.txt
+_SAMPLE_WITH_DIR = \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 doc/README.txt
+blob
+mark :2
+data 17
+Life
+is
+good ...
+commit refs/heads/master
+mark :101
+committer a <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :2 NEWS
+blob
+mark :3
+data 19
+Welcome!
+my friend
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :101
+M 644 :3 doc/README.txt
+M 644 :4 doc/index.txt
+"""
+
+class TestCaseWithFiltering(TestCase):
+
+    def assertFiltering(self, input_stream, params, expected):
+        outf = BytesIO()
+        proc = filter_processor.FilterProcessor(
+            params=params)
+        proc.outf = outf
+        s = BytesIO(input_stream)
+        p = parser.ImportParser(s)
+        proc.process(p.iter_commands)
+        out = outf.getvalue()
+        self.assertEqual(expected, out)
+
+class TestNoFiltering(TestCaseWithFiltering):
+
+    def test_params_not_given(self):
+        self.assertFiltering(_SAMPLE_ALL, None, _SAMPLE_ALL)
+
+    def test_params_are_none(self):
+        params = {b'include_paths': None, b'exclude_paths': None}
+        self.assertFiltering(_SAMPLE_ALL, params, _SAMPLE_ALL)
+
+
+class TestIncludePaths(TestCaseWithFiltering):
+
+    def test_file_in_root(self):
+        # Things to note:
+        # * only referenced blobs are retained
+        # * from clause is dropped from the first command
+        params = {b'include_paths': [b'NEWS']}
+        self.assertFiltering(_SAMPLE_WITH_DIR, params, \
+b"""blob
+mark :2
+data 17
+Life
+is
+good ...
+commit refs/heads/master
+mark :101
+committer a <b@c> 1234798653 +0000
+data 8
+test
+ing
+M 644 :2 NEWS
+""")
+
+    def test_file_in_subdir(self):
+        #  Additional things to note:
+        # * new root: path is now index.txt, not doc/index.txt
+        # * other files changed in matching commits are excluded
+        params = {b'include_paths': [b'doc/index.txt']}
+        self.assertFiltering(_SAMPLE_WITH_DIR, params, \
+b"""blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+M 644 :4 index.txt
+""")
+
+    def test_file_with_changes(self):
+        #  Additional things to note:
+        # * from updated to reference parents in the output
+        params = {b'include_paths': [b'doc/README.txt']}
+        self.assertFiltering(_SAMPLE_WITH_DIR, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 README.txt
+""")
+
+    def test_subdir(self):
+        params = {b'include_paths': [b'doc/']}
+        self.assertFiltering(_SAMPLE_WITH_DIR, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 README.txt
+M 644 :4 index.txt
+""")
+
+    def test_multiple_files_in_subdir(self):
+        # The new root should be the subdrectory
+        params = {b'include_paths': [b'doc/README.txt', b'doc/index.txt']}
+        self.assertFiltering(_SAMPLE_WITH_DIR, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 README.txt
+M 644 :4 index.txt
+""")
+
+
+class TestExcludePaths(TestCaseWithFiltering):
+
+    def test_file_in_root(self):
+        params = {b'exclude_paths': [b'NEWS']}
+        self.assertFiltering(_SAMPLE_WITH_DIR, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 doc/README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 doc/README.txt
+M 644 :4 doc/index.txt
+""")
+
+    def test_file_in_subdir(self):
+        params = {b'exclude_paths': [b'doc/README.txt']}
+        self.assertFiltering(_SAMPLE_WITH_DIR, params, \
+b"""blob
+mark :2
+data 17
+Life
+is
+good ...
+commit refs/heads/master
+mark :101
+committer a <b@c> 1234798653 +0000
+data 8
+test
+ing
+M 644 :2 NEWS
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :101
+M 644 :4 doc/index.txt
+""")
+
+    def test_subdir(self):
+        params = {b'exclude_paths': [b'doc/']}
+        self.assertFiltering(_SAMPLE_WITH_DIR, params, \
+b"""blob
+mark :2
+data 17
+Life
+is
+good ...
+commit refs/heads/master
+mark :101
+committer a <b@c> 1234798653 +0000
+data 8
+test
+ing
+M 644 :2 NEWS
+""")
+
+    def test_multple_files(self):
+        params = {b'exclude_paths': [b'doc/index.txt', b'NEWS']}
+        self.assertFiltering(_SAMPLE_WITH_DIR, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 doc/README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 doc/README.txt
+""")
+
+
+class TestIncludeAndExcludePaths(TestCaseWithFiltering):
+
+    def test_included_dir_and_excluded_file(self):
+        params = {b'include_paths': [b'doc/'], b'exclude_paths': [b'doc/index.txt']}
+        self.assertFiltering(_SAMPLE_WITH_DIR, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 README.txt
+""")
+
+
+# A sample input stream creating the following tree:
+#
+#  NEWS
+#  doc/README.txt
+#  doc/index.txt
+#
+# It then renames doc/README.txt => doc/README
+_SAMPLE_WITH_RENAME_INSIDE = _SAMPLE_WITH_DIR + \
+b"""commit refs/heads/master
+mark :103
+committer d <b@c> 1234798653 +0000
+data 10
+move intro
+from :102
+R doc/README.txt doc/README
+"""
+
+# A sample input stream creating the following tree:
+#
+#  NEWS
+#  doc/README.txt
+#  doc/index.txt
+#
+# It then renames doc/README.txt => README
+_SAMPLE_WITH_RENAME_TO_OUTSIDE = _SAMPLE_WITH_DIR + \
+b"""commit refs/heads/master
+mark :103
+committer d <b@c> 1234798653 +0000
+data 10
+move intro
+from :102
+R doc/README.txt README
+"""
+
+# A sample input stream creating the following tree:
+#
+#  NEWS
+#  doc/README.txt
+#  doc/index.txt
+#
+# It then renames NEWS => doc/NEWS
+_SAMPLE_WITH_RENAME_TO_INSIDE = _SAMPLE_WITH_DIR + \
+b"""commit refs/heads/master
+mark :103
+committer d <b@c> 1234798653 +0000
+data 10
+move intro
+from :102
+R NEWS doc/NEWS
+"""
+
+class TestIncludePathsWithRenames(TestCaseWithFiltering):
+
+    def test_rename_all_inside(self):
+        # These rename commands ought to be kept but adjusted for the new root
+        params = {b'include_paths': [b'doc/']}
+        self.assertFiltering(_SAMPLE_WITH_RENAME_INSIDE, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 README.txt
+M 644 :4 index.txt
+commit refs/heads/master
+mark :103
+committer d <b@c> 1234798653 +0000
+data 10
+move intro
+from :102
+R README.txt README
+""")
+
+    def test_rename_to_outside(self):
+        # These rename commands become deletes
+        params = {b'include_paths': [b'doc/']}
+        self.assertFiltering(_SAMPLE_WITH_RENAME_TO_OUTSIDE, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 README.txt
+M 644 :4 index.txt
+commit refs/heads/master
+mark :103
+committer d <b@c> 1234798653 +0000
+data 10
+move intro
+from :102
+D README.txt
+""")
+
+    def test_rename_to_inside(self):
+        # This ought to create a new file but doesn't yet
+        params = {b'include_paths': [b'doc/']}
+        self.assertFiltering(_SAMPLE_WITH_RENAME_TO_INSIDE, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 README.txt
+M 644 :4 index.txt
+""")
+
+
+# A sample input stream creating the following tree:
+#
+#  NEWS
+#  doc/README.txt
+#  doc/index.txt
+#
+# It then copies doc/README.txt => doc/README
+_SAMPLE_WITH_COPY_INSIDE = _SAMPLE_WITH_DIR + \
+b"""commit refs/heads/master
+mark :103
+committer d <b@c> 1234798653 +0000
+data 10
+move intro
+from :102
+C doc/README.txt doc/README
+"""
+
+# A sample input stream creating the following tree:
+#
+#  NEWS
+#  doc/README.txt
+#  doc/index.txt
+#
+# It then copies doc/README.txt => README
+_SAMPLE_WITH_COPY_TO_OUTSIDE = _SAMPLE_WITH_DIR + \
+b"""commit refs/heads/master
+mark :103
+committer d <b@c> 1234798653 +0000
+data 10
+move intro
+from :102
+C doc/README.txt README
+"""
+
+# A sample input stream creating the following tree:
+#
+#  NEWS
+#  doc/README.txt
+#  doc/index.txt
+#
+# It then copies NEWS => doc/NEWS
+_SAMPLE_WITH_COPY_TO_INSIDE = _SAMPLE_WITH_DIR + \
+b"""commit refs/heads/master
+mark :103
+committer d <b@c> 1234798653 +0000
+data 10
+move intro
+from :102
+C NEWS doc/NEWS
+"""
+
+
+class TestIncludePathsWithCopies(TestCaseWithFiltering):
+
+    def test_copy_all_inside(self):
+        # These copy commands ought to be kept but adjusted for the new root
+        params = {b'include_paths': [b'doc/']}
+        self.assertFiltering(_SAMPLE_WITH_COPY_INSIDE, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 README.txt
+M 644 :4 index.txt
+commit refs/heads/master
+mark :103
+committer d <b@c> 1234798653 +0000
+data 10
+move intro
+from :102
+C README.txt README
+""")
+
+    def test_copy_to_outside(self):
+        # This can be ignored
+        params = {b'include_paths': [b'doc/']}
+        self.assertFiltering(_SAMPLE_WITH_COPY_TO_OUTSIDE, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 README.txt
+M 644 :4 index.txt
+""")
+
+    def test_copy_to_inside(self):
+        # This ought to create a new file but doesn't yet
+        params = {b'include_paths': [b'doc/']}
+        self.assertFiltering(_SAMPLE_WITH_COPY_TO_INSIDE, params, \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+M 644 :1 README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+M 644 :3 README.txt
+M 644 :4 index.txt
+""")
+
+
+# A sample input stream with deleteall's creating the following tree:
+#
+#  NEWS
+#  doc/README.txt
+#  doc/index.txt
+_SAMPLE_WITH_DELETEALL = \
+b"""blob
+mark :1
+data 9
+Welcome!
+commit refs/heads/master
+mark :100
+committer a <b@c> 1234798653 +0000
+data 4
+test
+deleteall
+M 644 :1 doc/README.txt
+blob
+mark :3
+data 19
+Welcome!
+my friend
+blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+deleteall
+M 644 :3 doc/README.txt
+M 644 :4 doc/index.txt
+"""
+
+
+class TestIncludePathsWithDeleteAll(TestCaseWithFiltering):
+
+    def test_deleteall(self):
+        params = {b'include_paths': [b'doc/index.txt']}
+        self.assertFiltering(_SAMPLE_WITH_DELETEALL, params, \
+b"""blob
+mark :4
+data 11
+== Docs ==
+commit refs/heads/master
+mark :102
+committer d <b@c> 1234798653 +0000
+data 8
+test
+ing
+from :100
+deleteall
+M 644 :4 index.txt
+""")
+
+
+_SAMPLE_WITH_TAGS = _SAMPLE_WITH_DIR + \
+b"""tag v0.1
+from :100
+tagger d <b@c> 1234798653 +0000
+data 12
+release v0.1
+tag v0.2
+from :102
+tagger d <b@c> 1234798653 +0000
+data 12
+release v0.2
+"""
+
+class TestIncludePathsWithTags(TestCaseWithFiltering):
+
+    def test_tag_retention(self):
+        # If a tag references a commit with a parent we kept,
+        # keep the tag but adjust 'from' accordingly.
+        # Otherwise, delete the tag command.
+        params = {b'include_paths': [b'NEWS']}
+        self.assertFiltering(_SAMPLE_WITH_TAGS, params, \
+b"""blob
+mark :2
+data 17
+Life
+is
+good ...
+commit refs/heads/master
+mark :101
+committer a <b@c> 1234798653 +0000
+data 8
+test
+ing
+M 644 :2 NEWS
+tag v0.2
+from :101
+tagger d <b@c> 1234798653 +0000
+data 12
+release v0.2
+""")
+
+
+_SAMPLE_WITH_RESETS = _SAMPLE_WITH_DIR + \
+b"""reset refs/heads/foo
+reset refs/heads/bar
+from :102
+"""
+
+class TestIncludePathsWithResets(TestCaseWithFiltering):
+
+    def test_reset_retention(self):
+        # Resets init'ing a branch (without a from) are passed through.
+        # If a reset references a commit with a parent we kept,
+        # keep the reset but adjust 'from' accordingly.
+        params = {b'include_paths': [b'NEWS']}
+        self.assertFiltering(_SAMPLE_WITH_RESETS, params, \
+b"""blob
+mark :2
+data 17
+Life
+is
+good ...
+commit refs/heads/master
+mark :101
+committer a <b@c> 1234798653 +0000
+data 8
+test
+ing
+M 644 :2 NEWS
+reset refs/heads/foo
+reset refs/heads/bar
+from :101
+""")
+
+
+# A sample input stream containing empty commit
+_SAMPLE_EMPTY_COMMIT = \
+b"""blob
+mark :1
+data 4
+foo
+commit refs/heads/master
+mark :2
+committer Joe <joe@example.com> 1234567890 +1000
+data 14
+Initial import
+M 644 :1 COPYING
+commit refs/heads/master
+mark :3
+committer Joe <joe@example.com> 1234567890 +1000
+data 12
+empty commit
+"""
+
+# A sample input stream containing unresolved from and merge references
+_SAMPLE_FROM_MERGE_COMMIT = \
+b"""blob
+mark :1
+data 4
+foo
+commit refs/heads/master
+mark :3
+committer Joe <joe@example.com> 1234567890 +1000
+data 6
+import
+M 644 :1 COPYING
+blob
+mark :2
+data 4
+bar
+commit refs/heads/master
+mark :4
+committer Joe <joe@example.com> 1234567890 +1000
+data 19
+unknown from commit
+from :999
+M 644 :2 data/DATA
+blob
+mark :99
+data 4
+bar
+commit refs/heads/master
+mark :5
+committer Joe <joe@example.com> 1234567890 +1000
+data 12
+merge commit
+from :3
+merge :4
+merge :1001
+M 644 :99 data/DATA2
+"""
+
+class TestSquashEmptyCommitsFlag(TestCaseWithFiltering):
+
+    def test_squash_empty_commit(self):
+        params = {b'include_paths': None, b'exclude_paths': None}
+        self.assertFiltering(_SAMPLE_EMPTY_COMMIT, params, \
+b"""blob
+mark :1
+data 4
+foo
+commit refs/heads/master
+mark :2
+committer Joe <joe@example.com> 1234567890 +1000
+data 14
+Initial import
+M 644 :1 COPYING
+""")
+
+    def test_keep_empty_commit(self):
+        params = {b'include_paths': None, b'exclude_paths': None, b'squash_empty_commits': False}
+        self.assertFiltering(_SAMPLE_EMPTY_COMMIT, params, _SAMPLE_EMPTY_COMMIT)
+
+    def test_squash_unresolved_references(self):
+        params = {b'include_paths': None, b'exclude_paths': None}
+        self.assertFiltering(_SAMPLE_FROM_MERGE_COMMIT, params, \
+b"""blob
+mark :1
+data 4
+foo
+commit refs/heads/master
+mark :3
+committer Joe <joe@example.com> 1234567890 +1000
+data 6
+import
+M 644 :1 COPYING
+blob
+mark :2
+data 4
+bar
+commit refs/heads/master
+mark :4
+committer Joe <joe@example.com> 1234567890 +1000
+data 19
+unknown from commit
+from :999
+M 644 :2 data/DATA
+blob
+mark :99
+data 4
+bar
+commit refs/heads/master
+mark :5
+committer Joe <joe@example.com> 1234567890 +1000
+data 12
+merge commit
+from :3
+merge :4
+merge :1001
+M 644 :99 data/DATA2
+""")
+
+    def test_keep_unresolved_from_and_merge(self):
+        params = {b'include_paths': None, b'exclude_paths': None, b'squash_empty_commits': False}
+        self.assertFiltering(_SAMPLE_FROM_MERGE_COMMIT, params, _SAMPLE_FROM_MERGE_COMMIT)
+
+    def test_with_excludes(self):
+        params = {b'include_paths': None,
+                  b'exclude_paths': [b'data/DATA'],
+                  b'squash_empty_commits': False}
+        self.assertFiltering(_SAMPLE_FROM_MERGE_COMMIT, params, \
+b"""blob
+mark :1
+data 4
+foo
+commit refs/heads/master
+mark :3
+committer Joe <joe@example.com> 1234567890 +1000
+data 6
+import
+M 644 :1 COPYING
+commit refs/heads/master
+mark :4
+committer Joe <joe@example.com> 1234567890 +1000
+data 19
+unknown from commit
+from :999
+blob
+mark :99
+data 4
+bar
+commit refs/heads/master
+mark :5
+committer Joe <joe@example.com> 1234567890 +1000
+data 12
+merge commit
+from :3
+merge :4
+merge :1001
+M 644 :99 data/DATA2
+""")
+
+    def test_with_file_includes(self):
+        params = {b'include_paths': [b'COPYING', b'data/DATA2'],
+                  b'exclude_paths': None,
+                  b'squash_empty_commits': False}
+        self.assertFiltering(_SAMPLE_FROM_MERGE_COMMIT, params, \
+b"""blob
+mark :1
+data 4
+foo
+commit refs/heads/master
+mark :3
+committer Joe <joe@example.com> 1234567890 +1000
+data 6
+import
+M 644 :1 COPYING
+commit refs/heads/master
+mark :4
+committer Joe <joe@example.com> 1234567890 +1000
+data 19
+unknown from commit
+from :999
+blob
+mark :99
+data 4
+bar
+commit refs/heads/master
+mark :5
+committer Joe <joe@example.com> 1234567890 +1000
+data 12
+merge commit
+from :3
+merge :4
+merge :1001
+M 644 :99 data/DATA2
+"""
+)
+
+    def test_with_directory_includes(self):
+        params = {b'include_paths': [b'data/'],
+                  b'exclude_paths': None,
+                  b'squash_empty_commits': False}
+        self.assertFiltering(_SAMPLE_FROM_MERGE_COMMIT, params, \
+b"""commit refs/heads/master
+mark :3
+committer Joe <joe@example.com> 1234567890 +1000
+data 6
+import
+blob
+mark :2
+data 4
+bar
+commit refs/heads/master
+mark :4
+committer Joe <joe@example.com> 1234567890 +1000
+data 19
+unknown from commit
+from :999
+M 644 :2 DATA
+blob
+mark :99
+data 4
+bar
+commit refs/heads/master
+mark :5
+committer Joe <joe@example.com> 1234567890 +1000
+data 12
+merge commit
+from :3
+merge :4
+merge :1001
+M 644 :99 DATA2
+""")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/tests/test_helpers.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,55 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Test the helper functions."""
+
+import unittest
+
+from fastimport import (
+    helpers,
+    )
+
+
+class TestCommonDirectory(unittest.TestCase):
+
+    def test_no_paths(self):
+        c = helpers.common_directory(None)
+        self.assertEqual(c, None)
+        c = helpers.common_directory([])
+        self.assertEqual(c, None)
+
+    def test_one_path(self):
+        c = helpers.common_directory([b'foo'])
+        self.assertEqual(c, b'')
+        c = helpers.common_directory([b'foo/'])
+        self.assertEqual(c, b'foo/')
+        c = helpers.common_directory([b'foo/bar'])
+        self.assertEqual(c, b'foo/')
+
+    def test_two_paths(self):
+        c = helpers.common_directory([b'foo', b'bar'])
+        self.assertEqual(c, b'')
+        c = helpers.common_directory([b'foo/', b'bar'])
+        self.assertEqual(c, b'')
+        c = helpers.common_directory([b'foo/', b'foo/bar'])
+        self.assertEqual(c, b'foo/')
+        c = helpers.common_directory([b'foo/bar/x', b'foo/bar/y'])
+        self.assertEqual(c, b'foo/bar/')
+        c = helpers.common_directory([b'foo/bar/aa_x', b'foo/bar/aa_y'])
+        self.assertEqual(c, b'foo/bar/')
+
+    def test_lots_of_paths(self):
+        c = helpers.common_directory([b'foo/bar/x', b'foo/bar/y', b'foo/bar/z'])
+        self.assertEqual(c, b'foo/bar/')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/tests/test_info_processor.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,77 @@
+# Copyright (C) 2018 Jelmer Vernooij
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Test InfoProcessor"""
+from io import BytesIO
+
+try:
+    from StringIO import StringIO
+except ImportError:
+    from io import StringIO
+
+from unittest import TestCase
+
+from fastimport import (
+    parser,
+    )
+
+from fastimport.processors import (
+    info_processor,
+    )
+
+simple_fast_import_stream = b"""commit refs/heads/master
+mark :1
+committer Jelmer Vernooij <jelmer@samba.org> 1299718135 +0100
+data 7
+initial
+
+"""
+
+class TestFastImportInfo(TestCase):
+
+    def test_simple(self):
+        stream = BytesIO(simple_fast_import_stream)
+        outf = StringIO()
+        proc = info_processor.InfoProcessor(outf=outf)
+        p = parser.ImportParser(stream)
+        proc.process(p.iter_commands)
+
+        self.maxDiff = None
+        self.assertEqual(outf.getvalue(), """Command counts:
+\t0\tblob
+\t0\tcheckpoint
+\t1\tcommit
+\t0\tfeature
+\t0\tprogress
+\t0\treset
+\t0\ttag
+File command counts:
+\t0\tfilemodify
+\t0\tfiledelete
+\t0\tfilecopy
+\t0\tfilerename
+\t0\tfiledeleteall
+Parent counts:
+\t1\tparents-0
+\t0\ttotal revisions merged
+Commit analysis:
+\tno\tblobs referenced by SHA
+\tno\texecutables
+\tno\tseparate authors found
+\tno\tsymlinks
+Head analysis:
+\t:1\trefs/heads/master
+Merges:
+""")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext3rd/fastimport/vendor/python_fastimport/tests/test_parser.py	Tue Jan 19 22:56:34 2021 +0000
@@ -0,0 +1,353 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Test the Import parsing"""
+import io
+import time
+import unittest
+
+from fastimport import (
+    commands,
+    errors,
+    parser,
+    )
+
+
+class TestLineBasedParser(unittest.TestCase):
+
+    def test_push_line(self):
+        s = io.BytesIO(b"foo\nbar\nbaz\n")
+        p = parser.LineBasedParser(s)
+        self.assertEqual(b'foo', p.next_line())
+        self.assertEqual(b'bar', p.next_line())
+        p.push_line(b'bar')
+        self.assertEqual(b'bar', p.next_line())
+        self.assertEqual(b'baz', p.next_line())
+        self.assertEqual(None, p.next_line())
+
+    def test_read_bytes(self):
+        s = io.BytesIO(b"foo\nbar\nbaz\n")
+        p = parser.LineBasedParser(s)
+        self.assertEqual(b'fo', p.read_bytes(2))
+        self.assertEqual(b'o\nb', p.read_bytes(3))
+        self.assertEqual(b'ar', p.next_line())
+        # Test that the line buffer is ignored
+        p.push_line(b'bar')
+        self.assertEqual(b'baz', p.read_bytes(3))
+        # Test missing bytes
+        self.assertRaises(errors.MissingBytes, p.read_bytes, 10)
+
+    def test_read_until(self):
+        # TODO
+        return
+        s = io.BytesIO(b"foo\nbar\nbaz\nabc\ndef\nghi\n")
+        p = parser.LineBasedParser(s)
+        self.assertEqual(b'foo\nbar', p.read_until(b'baz'))
+        self.assertEqual(b'abc', p.next_line())
+        # Test that the line buffer is ignored
+        p.push_line(b'abc')
+        self.assertEqual(b'def', p.read_until(b'ghi'))
+        # Test missing terminator
+        self.assertRaises(errors.MissingTerminator, p.read_until(b'>>>'))
+
+
+# Sample text
+_sample_import_text = b"""
+progress completed
+# Test blob formats
+blob
+mark :1
+data 4
+aaaablob
+data 5
+bbbbb
+# Commit formats
+commit refs/heads/master
+mark :2
+committer bugs bunny <bugs@bunny.org> now
+data 14
+initial import
+M 644 inline README
+data 18
+Welcome from bugs
+commit refs/heads/master
+committer <bugs@bunny.org> now
+data 13
+second commit
+from :2
+M 644 inline README
+data 23
+Welcome from bugs, etc.
+# Miscellaneous
+checkpoint
+progress completed
+# Test a commit without sub-commands (bug #351717)
+commit refs/heads/master
+mark :3
+author <bugs@bunny.org> now
+committer <bugs@bunny.org> now
+data 20
+first commit, empty
+# Test a commit with a heredoc-style (delimited_data) messsage (bug #400960)
+commit refs/heads/master
+mark :4
+author <bugs@bunny.org> now
+committer <bugs@bunny.org> now
+data <<EOF
+Commit with heredoc-style message
+EOF
+# Test a "submodule"/tree-reference
+commit refs/heads/master
+mark :5
+author <bugs@bunny.org> now
+committer <bugs@bunny.org> now
+data 15
+submodule test
+M 160000 rev-id tree-id
+# Test features
+feature whatever
+feature foo=bar
+# Test commit with properties
+commit refs/heads/master
+mark :6
+committer <bugs@bunny.org> now
+data 18
+test of properties
+property p1
+property p2 5 hohum
+property p3 16 alpha
+beta
+gamma
+property p4 8 whatever
+# Test a commit with multiple authors
+commit refs/heads/master
+mark :7
+author Fluffy <fluffy@bunny.org> now
+author Daffy <daffy@duck.org> now
+author Donald <donald@duck.org> now
+committer <bugs@bunny.org> now
+data 17
+multi-author test
+"""
+
+_timefunc = time.time
+class TestImportParser(unittest.TestCase):
+    def setUp(self):
+        self.fake_time = 42.0123
+        time.time = lambda: self.fake_time
+    def tearDown(self):
+        time.time = _timefunc
+        del self.fake_time
+
+    def test_iter_commands(self):
+        s = io.BytesIO(_sample_import_text)
+        p = parser.ImportParser(s)
+        result = []
+        for cmd in p.iter_commands():
+            result.append(cmd)
+            if cmd.name == b'commit':
+                for fc in cmd.iter_files():
+                    result.append(fc)
+
+        self.assertEqual(len(result), 17)
+        cmd1 = result.pop(0)
+        self.assertEqual(b'progress', cmd1.name)
+        self.assertEqual(b'completed', cmd1.message)
+        cmd2 = result.pop(0)
+        self.assertEqual(b'blob', cmd2.name)
+        self.assertEqual(b'1', cmd2.mark)
+        self.assertEqual(b':1', cmd2.id)
+        self.assertEqual(b'aaaa', cmd2.data)
+        self.assertEqual(4, cmd2.lineno)
+        cmd3 = result.pop(0)
+        self.assertEqual(b'blob', cmd3.name)
+        self.assertEqual(b'@7', cmd3.id)
+        self.assertEqual(None, cmd3.mark)
+        self.assertEqual(b'bbbbb', cmd3.data)
+        self.assertEqual(7, cmd3.lineno)
+        cmd4 = result.pop(0)
+        self.assertEqual(b'commit', cmd4.name)
+        self.assertEqual(b'2', cmd4.mark)
+        self.assertEqual(b':2', cmd4.id)
+        self.assertEqual(b'initial import', cmd4.message)
+
+        self.assertEqual((b'bugs bunny', b'bugs@bunny.org', self.fake_time, 0), cmd4.committer)
+        # namedtuple attributes
+        self.assertEqual(b'bugs bunny', cmd4.committer.name)
+        self.assertEqual(b'bugs@bunny.org', cmd4.committer.email)
+        self.assertEqual(self.fake_time, cmd4.committer.timestamp)
+        self.assertEqual(0, cmd4.committer.timezone)
+
+        self.assertEqual(None, cmd4.author)
+        self.assertEqual(11, cmd4.lineno)
+        self.assertEqual(b'refs/heads/master', cmd4.ref)
+        self.assertEqual(None, cmd4.from_)
+        self.assertEqual([], cmd4.merges)
+        file_cmd1 = result.pop(0)
+        self.assertEqual(b'filemodify', file_cmd1.name)
+        self.assertEqual(b'README', file_cmd1.path)
+        self.assertEqual(0o100644, file_cmd1.mode)
+        self.assertEqual(b'Welcome from bugs\n', file_cmd1.data)
+        cmd5 = result.pop(0)
+        self.assertEqual(b'commit', cmd5.name)
+        self.assertEqual(None, cmd5.mark)
+        self.assertEqual(b'@19', cmd5.id)
+        self.assertEqual(b'second commit', cmd5.message)
+        self.assertEqual((b'', b'bugs@bunny.org', self.fake_time, 0), cmd5.committer)
+        self.assertEqual(None, cmd5.author)
+        self.assertEqual(19, cmd5.lineno)
+        self.assertEqual(b'refs/heads/master', cmd5.ref)
+        self.assertEqual(b':2', cmd5.from_)
+        self.assertEqual([], cmd5.merges)
+        file_cmd2 = result.pop(0)
+        self.assertEqual(b'filemodify', file_cmd2.name)
+        self.assertEqual(b'README', file_cmd2.path)
+        self.assertEqual(0o100644, file_cmd2.mode)
+        self.assertEqual(b'Welcome from bugs, etc.', file_cmd2.data)
+        cmd6 = result.pop(0)
+        self.assertEqual(cmd6.name, b'checkpoint')
+        cmd7 = result.pop(0)
+        self.assertEqual(b'progress', cmd7.name)
+        self.assertEqual(b'completed', cmd7.message)
+        cmd = result.pop(0)
+        self.assertEqual(b'commit', cmd.name)
+        self.assertEqual(b'3', cmd.mark)
+        self.assertEqual(None, cmd.from_)
+        cmd = result.pop(0)
+        self.assertEqual(b'commit', cmd.name)
+        self.assertEqual(b'4', cmd.mark)
+        self.assertEqual(b'Commit with heredoc-style message\n', cmd.message)
+        cmd = result.pop(0)
+        self.assertEqual(b'commit', cmd.name)
+        self.assertEqual(b'5', cmd.mark)
+        self.assertEqual(b'submodule test\n', cmd.message)
+        file_cmd1 = result.pop(0)
+        self.assertEqual(b'filemodify', file_cmd1.name)
+        self.assertEqual(b'tree-id', file_cmd1.path)
+        self.assertEqual(0o160000, file_cmd1.mode)
+        self.assertEqual(b"rev-id", file_cmd1.dataref)
+        cmd = result.pop(0)
+        self.assertEqual(b'feature', cmd.name)
+        self.assertEqual(b'whatever', cmd.feature_name)
+        self.assertEqual(None, cmd.value)
+        cmd = result.pop(0)
+        self.assertEqual(b'feature', cmd.name)
+        self.assertEqual(b'foo', cmd.feature_name)
+        self.assertEqual(b'bar', cmd.value)
+        cmd = result.pop(0)
+        self.assertEqual(b'commit', cmd.name)
+        self.assertEqual(b'6', cmd.mark)
+        self.assertEqual(b'test of properties', cmd.message)
+        self.assertEqual({
+            b'p1': None,
+            b'p2': b'hohum',
+            b'p3': b'alpha\nbeta\ngamma',
+            b'p4': b'whatever',
+        }, cmd.properties)
+        cmd = result.pop(0)
+        self.assertEqual(b'commit', cmd.name)
+        self.assertEqual(b'7', cmd.mark)
+        self.assertEqual(b'multi-author test', cmd.message)
+        self.assertEqual(b'', cmd.committer[0])
+        self.assertEqual(b'bugs@bunny.org', cmd.committer[1])
+        self.assertEqual(b'Fluffy', cmd.author[0])
+        self.assertEqual(b'fluffy@bunny.org', cmd.author[1])
+        self.assertEqual(b'Daffy', cmd.more_authors[0][0])
+        self.assertEqual(b'daffy@duck.org', cmd.more_authors[0][1])
+        self.assertEqual(b'Donald', cmd.more_authors[1][0])
+        self.assertEqual(b'donald@duck.org', cmd.more_authors[1][1])
+
+    def test_done_feature_missing_done(self):
+        s = io.BytesIO(b"""feature done
+""")
+        p = parser.ImportParser(s)
+        cmds = p.iter_commands()
+        self.assertEqual(b"feature", next(cmds).name)
+        self.assertRaises(errors.PrematureEndOfStream, lambda: next(cmds))
+
+    def test_done_with_feature(self):
+        s = io.BytesIO(b"""feature done
+done
+more data
+""")
+        p = parser.ImportParser(s)
+        cmds = p.iter_commands()
+        self.assertEqual(b"feature", next(cmds).name)
+        self.assertRaises(StopIteration, lambda: next(cmds))
+
+    def test_done_without_feature(self):
+        s = io.BytesIO(b"""done
+more data
+""")
+        p = parser.ImportParser(s)
+        cmds = p.iter_commands()
+        self.assertEqual([], list(cmds))
+
+
+class TestStringParsing(unittest.TestCase):
+
+    def test_unquote(self):
+        s = br'hello \"sweet\" wo\\r\tld'
+        self.assertEqual(br'hello "sweet" wo\r' + b'\tld',
+            parser._unquote_c_string(s))
+
+
+class TestPathPairParsing(unittest.TestCase):
+
+    def test_path_pair_simple(self):
+        p = parser.ImportParser(b'')
+        self.assertEqual([b'foo', b'bar'], p._path_pair(b'foo bar'))
+
+    def test_path_pair_spaces_in_first(self):
+        p = parser.ImportParser("")
+        self.assertEqual([b'foo bar', b'baz'],
+            p._path_pair(b'"foo bar" baz'))
+
+
+class TestTagParsing(unittest.TestCase):
+
+    def test_tagger_with_email(self):
+        p = parser.ImportParser(io.BytesIO(
+            b"tag refs/tags/v1.0\n"
+            b"from :xxx\n"
+            b"tagger Joe Wong <joe@example.com> 1234567890 -0600\n"
+            b"data 11\n"
+            b"create v1.0"))
+        cmds = list(p.iter_commands())
+        self.assertEqual(1, len(cmds))
+        self.assertTrue(isinstance(cmds[0], commands.TagCommand))
+        self.assertEqual(cmds[0].tagger,
+            (b'Joe Wong', b'joe@example.com', 1234567890.0, -21600))
+
+    def test_tagger_no_email_strict(self):
+        p = parser.ImportParser(io.BytesIO(
+            b"tag refs/tags/v1.0\n"
+            b"from :xxx\n"
+            b"tagger Joe Wong\n"
+            b"data 11\n"
+            b"create v1.0"))
+        self.assertRaises(errors.BadFormat, list, p.iter_commands())
+
+    def test_tagger_no_email_not_strict(self):
+        p = parser.ImportParser(io.BytesIO(
+            b"tag refs/tags/v1.0\n"
+            b"from :xxx\n"
+            b"tagger Joe Wong\n"
+            b"data 11\n"
+            b"create v1.0"), strict=False)
+        cmds = list(p.iter_commands())
+        self.assertEqual(1, len(cmds))
+        self.assertTrue(isinstance(cmds[0], commands.TagCommand))
+        self.assertEqual(cmds[0].tagger[:2], (b'Joe Wong', None))