From 0d07110c0a06eb95653a48e173404796ba3cad72 Mon Sep 17 00:00:00 2001 From: tristanlatr <19967168+tristanlatr@users.noreply.github.com> Date: Sun, 15 Sep 2024 18:33:44 +0200 Subject: [PATCH] 2024 maintenance (#814) * Fix issue #504, #795, #803. * Support python 3.13 and adopt python 3.12 everywhere in the CI except for unit test which are ran on all supported python versions and platforms. --- .github/workflows/pydoctor_primer.yaml | 2 +- .github/workflows/system.yaml | 2 +- .github/workflows/unit.yaml | 14 ++-- README.rst | 4 ++ pydoctor/epydoc/docutils.py | 34 ++++++---- pydoctor/epydoc/markup/__init__.py | 4 +- pydoctor/epydoc/markup/_pyval_repr.py | 20 +++--- pydoctor/epydoc/markup/_types.py | 17 +++-- pydoctor/epydoc/markup/epytext.py | 8 ++- pydoctor/epydoc/markup/restructuredtext.py | 73 +++++++++++---------- pydoctor/epydoc2stan.py | 2 +- pydoctor/node2stan.py | 76 ++++++++++++---------- pydoctor/sphinx.py | 4 +- pydoctor/test/epydoc/test_pyval_repr.py | 2 +- pydoctor/test/test_commandline.py | 4 ++ pydoctor/test/test_model.py | 4 +- pydoctor/test/test_type_fields.py | 2 +- pydoctor/test/test_visitor.py | 2 +- setup.cfg | 11 ++-- tox.ini | 24 ------- 20 files changed, 156 insertions(+), 153 deletions(-) diff --git a/.github/workflows/pydoctor_primer.yaml b/.github/workflows/pydoctor_primer.yaml index 137f6e26a..8e9393b97 100644 --- a/.github/workflows/pydoctor_primer.yaml +++ b/.github/workflows/pydoctor_primer.yaml @@ -28,7 +28,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" - name: Install pydoctor_primer run: | python -m pip install -U pip diff --git a/.github/workflows/system.yaml b/.github/workflows/system.yaml index 9fed60686..897d84dab 100644 --- a/.github/workflows/system.yaml +++ b/.github/workflows/system.yaml @@ -20,7 +20,7 @@ jobs: - name: Set up CPython uses: actions/setup-python@v5 with: - python-version: '3.11' + python-version: '3.12' - name: Install tox run: | diff --git a/.github/workflows/unit.yaml b/.github/workflows/unit.yaml index 161a6ba3b..7e42c0f9c 100644 --- a/.github/workflows/unit.yaml +++ b/.github/workflows/unit.yaml @@ -19,14 +19,9 @@ jobs: strategy: matrix: - # Re-enable 3.13-dev when https://github.com/zopefoundation/zope.interface/issues/292 is fixed - python-version: [pypy-3.8, 3.8, 3.9, '3.10', 3.11, '3.12'] - os: [ubuntu-22.04] - include: - - os: windows-latest - python-version: 3.11 - - os: macos-latest - python-version: 3.11 + python-version: ['pypy-3.8', 'pypy-3.9', 'pypy-3.10', + '3.8', '3.9', '3.10', '3.11', '3.12', '3.13.0-rc.2'] + os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 @@ -51,8 +46,7 @@ jobs: run: | tox -e test - - name: Run unit tests with latest Twisted version (only for python 3.8 and later) - if: matrix.python-version != '3.7' && matrix.python-version != 'pypy-3.7' + - name: Run unit tests with latest Twisted version run: | tox -e test-latest-twisted diff --git a/README.rst b/README.rst index 5a920d0d3..79e0002c9 100644 --- a/README.rst +++ b/README.rst @@ -76,7 +76,11 @@ What's New? in development ^^^^^^^^^^^^^^ +* Drop Python 3.7 and support Python 3.13. * Trigger a warning when several docstrings are detected for the same object. +* Improve typing of docutils related code. +* Run unit tests on all supported combinations of Python versions and platforms, including PyPy for Windows. Previously, tests where ran on all supported Python version for Linux, but not for MacOS and Windows. +* Replace the deprecated dependency appdirs with platformdirs. * Fix WinError caused by the failure of the symlink creation process. Pydoctor should now run on windows without the need to be administrator. diff --git a/pydoctor/epydoc/docutils.py b/pydoctor/epydoc/docutils.py index 44ce8c61e..66442b2de 100644 --- a/pydoctor/epydoc/docutils.py +++ b/pydoctor/epydoc/docutils.py @@ -3,7 +3,7 @@ """ from __future__ import annotations -from typing import Iterable, Iterator, Optional +from typing import Iterable, Iterator, Optional, TypeVar, cast import optparse @@ -25,7 +25,7 @@ def new_document(source_path: str, settings: Optional[optparse.Values] = None) - # the default settings. Otherwise we let new_document figure it out. if settings is None and docutils_version_info >= (0,19): if _DEFAULT_DOCUTILS_SETTINGS is None: - _DEFAULT_DOCUTILS_SETTINGS = frontend.get_default_settings() # type:ignore[attr-defined] + _DEFAULT_DOCUTILS_SETTINGS = frontend.get_default_settings() settings = _DEFAULT_DOCUTILS_SETTINGS @@ -41,10 +41,11 @@ def _set_nodes_parent(nodes: Iterable[nodes.Node], parent: nodes.Element) -> Ite node.parent = parent yield node -def set_node_attributes(node: nodes.Node, +TNode = TypeVar('TNode', bound=nodes.Node) +def set_node_attributes(node: TNode, document: Optional[nodes.document] = None, lineno: Optional[int] = None, - children: Optional[Iterable[nodes.Node]] = None) -> nodes.Node: + children: Optional[Iterable[nodes.Node]] = None) -> TNode: """ Set the attributes of a Node and return the modified node. This is required to manually construct a docutils document that is consistent. @@ -68,29 +69,34 @@ def set_node_attributes(node: nodes.Node, return node -def build_table_of_content(node: nodes.Node, depth: int, level: int = 0) -> Optional[nodes.Node]: +def build_table_of_content(node: nodes.Element, depth: int, level: int = 0) -> nodes.Element | None: """ Simplified from docutils Contents transform. All section nodes MUST have set attribute 'ids' to a list of strings. """ - def _copy_and_filter(node: nodes.Node) -> nodes.Node: + def _copy_and_filter(node: nodes.Element) -> nodes.Element: """Return a copy of a title, with references, images, etc. removed.""" - visitor = parts.ContentsFilter(node.document) + if (doc:=node.document) is None: + raise AssertionError(f'missing document attribute on {node}') + visitor = parts.ContentsFilter(doc) node.walkabout(visitor) - return visitor.get_entry_text() + # the stubs are currently imcomplete, 2024. + return visitor.get_entry_text() # type:ignore level += 1 sections = [sect for sect in node if isinstance(sect, nodes.section)] entries = [] + if (doc:=node.document) is None: + raise AssertionError(f'missing document attribute on {node}') + for section in sections: - title = section[0] + title = cast(nodes.Element, section[0]) # the first element of a section is the header. entrytext = _copy_and_filter(title) reference = nodes.reference('', '', refid=section['ids'][0], *entrytext) - ref_id = node.document.set_id(reference, - suggested_prefix='toc-entry') + ref_id = doc.set_id(reference, suggested_prefix='toc-entry') entry = nodes.paragraph('', '', reference) item = nodes.list_item('', entry) if title.next_node(nodes.reference) is None: @@ -105,7 +111,7 @@ def _copy_and_filter(node: nodes.Node) -> nodes.Node: else: return None -def get_lineno(node: nodes.Node) -> int: +def get_lineno(node: nodes.Element) -> int: """ Get the 0-based line number for a docutils `nodes.title_reference`. @@ -114,7 +120,7 @@ def get_lineno(node: nodes.Node) -> int: """ # Fixes https://github.com/twisted/pydoctor/issues/237 - def get_first_parent_lineno(_node: Optional[nodes.Node]) -> int: + def get_first_parent_lineno(_node: nodes.Element | None) -> int: if _node is None: return 0 @@ -143,7 +149,7 @@ def get_first_parent_lineno(_node: Optional[nodes.Node]) -> int: else: line = get_first_parent_lineno(node.parent) - return line # type:ignore[no-any-return] + return line class wbr(nodes.inline): """ diff --git a/pydoctor/epydoc/markup/__init__.py b/pydoctor/epydoc/markup/__init__.py index 3933934fb..3d2d721fc 100644 --- a/pydoctor/epydoc/markup/__init__.py +++ b/pydoctor/epydoc/markup/__init__.py @@ -430,14 +430,14 @@ def visit_document(self, node: nodes.Node) -> None: _SENTENCE_RE_SPLIT = re.compile(r'( *[\.\?!][\'"\)\]]* *)') - def visit_paragraph(self, node: nodes.Node) -> None: + def visit_paragraph(self, node: nodes.paragraph) -> None: if self.summary is not None: # found a paragraph after the first one self.other_docs = True raise nodes.StopTraversal() summary_doc = new_document('summary') - summary_pieces = [] + summary_pieces: list[nodes.Node] = [] # Extract the first sentences from the first paragraph until maximum number # of characters is reach or until the end of the paragraph. diff --git a/pydoctor/epydoc/markup/_pyval_repr.py b/pydoctor/epydoc/markup/_pyval_repr.py index c466eb9ee..7691b79a1 100644 --- a/pydoctor/epydoc/markup/_pyval_repr.py +++ b/pydoctor/epydoc/markup/_pyval_repr.py @@ -390,24 +390,25 @@ def _trim_result(self, result: List[nodes.Node], num_chars: int) -> None: while num_chars > 0: if not result: return - if isinstance(result[-1], nodes.Element): - if len(result[-1].children) >= 1: - data = result[-1][-1].astext() + if isinstance(r1:=result[-1], nodes.Element): + if len(r1.children) >= 1: + data = r1[-1].astext() trim = min(num_chars, len(data)) - result[-1][-1] = nodes.Text(data[:-trim]) - if not result[-1][-1].astext(): - if len(result[-1].children) == 1: + r1[-1] = nodes.Text(data[:-trim]) + if not r1[-1].astext(): + if len(r1.children) == 1: result.pop() else: - result[-1].pop() + r1.pop() else: trim = 0 result.pop() num_chars -= trim else: # Must be Text if it's not an Element - trim = min(num_chars, len(result[-1])) - result[-1] = nodes.Text(result[-1].astext()[:-trim]) + assert isinstance(r1, nodes.Text) + trim = min(num_chars, len(r1)) + result[-1] = nodes.Text(r1.astext()[:-trim]) if not result[-1].astext(): result.pop() num_chars -= trim @@ -1012,6 +1013,7 @@ def _output(self, s: AnyStr, css_class: Optional[str], # If the segment fits on the current line, then just call # markup to tag it, and store the result. # Don't break links into separate segments, neither quotes. + element: nodes.Node if (self.linelen is None or state.charpos + segment_len <= self.linelen or link is True diff --git a/pydoctor/epydoc/markup/_types.py b/pydoctor/epydoc/markup/_types.py index 005f80a0c..8e94243d6 100644 --- a/pydoctor/epydoc/markup/_types.py +++ b/pydoctor/epydoc/markup/_types.py @@ -5,7 +5,7 @@ """ from __future__ import annotations -from typing import Callable, Dict, List, Tuple, Union +from typing import Any, Callable, Dict, List, Tuple, Union, cast from pydoctor.epydoc.markup import DocstringLinker, ParseError, ParsedDocstring, get_parser_by_name from pydoctor.node2stan import node2stan @@ -21,8 +21,9 @@ class ParsedTypeDocstring(TypeDocstring, ParsedDocstring): """ FIELDS = ('type', 'rtype', 'ytype', 'returntype', 'yieldtype') - - _tokens: List[Tuple[Union[str, nodes.Node], TokenType]] + + # yes this overrides the superclass type! + _tokens: list[tuple[str | nodes.Node, TokenType]] # type: ignore def __init__(self, annotation: Union[nodes.document, str], warns_on_unknown_tokens: bool = False, lineno: int = 0) -> None: @@ -31,7 +32,8 @@ def __init__(self, annotation: Union[nodes.document, str], TypeDocstring.__init__(self, '', warns_on_unknown_tokens) _tokens = self._tokenize_node_type_spec(annotation) - self._tokens = self._build_tokens(_tokens) + self._tokens = cast('list[tuple[str | nodes.Node, TokenType]]', + self._build_tokens(_tokens)) self._trigger_warnings() else: TypeDocstring.__init__(self, annotation, warns_on_unknown_tokens) @@ -82,8 +84,8 @@ def _warn_not_supported(n:nodes.Node) -> None: return tokens - def _convert_obj_tokens_to_stan(self, tokens: List[Tuple[Union[str, nodes.Node], TokenType]], - docstring_linker: DocstringLinker) -> List[Tuple[Union[str, Tag, nodes.Node], TokenType]]: + def _convert_obj_tokens_to_stan(self, tokens: List[Tuple[Any, TokenType]], + docstring_linker: DocstringLinker) -> list[tuple[Any, TokenType]]: """ Convert L{TokenType.OBJ} and PEP 484 like L{TokenType.DELIMITER} type to stan, merge them together. Leave the rest untouched. @@ -96,12 +98,13 @@ def _convert_obj_tokens_to_stan(self, tokens: List[Tuple[Union[str, nodes.Node], @param tokens: List of tuples: C{(token, type)} """ - combined_tokens: List[Tuple[Union[str, Tag], TokenType]] = [] + combined_tokens: list[tuple[Any, TokenType]] = [] open_parenthesis = 0 open_square_braces = 0 for _token, _type in tokens: + # The actual type of_token is str | Tag | Node. if (_type is TokenType.DELIMITER and _token in ('[', '(', ')', ']')) \ or _type is TokenType.OBJ: diff --git a/pydoctor/epydoc/markup/epytext.py b/pydoctor/epydoc/markup/epytext.py index 0026614d2..5a35ab732 100644 --- a/pydoctor/epydoc/markup/epytext.py +++ b/pydoctor/epydoc/markup/epytext.py @@ -1425,7 +1425,9 @@ def _to_node(self, tree: Element) -> Iterable[nodes.Node]: yield set_node_attributes(nodes.inline('', ''), document=self._document, children=variables) elif tree.tag == 'target': value, = variables - yield set_node_attributes(nodes.Text(value), document=self._document) + if not isinstance(value, nodes.Text): + raise AssertionError("target contents must be a simple text.") + yield set_node_attributes(value, document=self._document) elif tree.tag == 'italic': yield set_node_attributes(nodes.emphasis('', ''), document=self._document, children=variables) elif tree.tag == 'math': @@ -1445,7 +1447,9 @@ def _to_node(self, tree: Element) -> Iterable[nodes.Node]: elif tree.tag == 'literalblock': yield set_node_attributes(nodes.literal_block('', ''), document=self._document, children=variables) elif tree.tag == 'doctestblock': - yield set_node_attributes(nodes.doctest_block(tree.children[0], tree.children[0]), document=self._document) + if not isinstance(contents:=tree.children[0], str): + raise AssertionError("doctest block contents is not a string") + yield set_node_attributes(nodes.doctest_block(contents, contents), document=self._document) elif tree.tag in ('fieldlist', 'tag', 'arg'): raise AssertionError("There should not be any field lists left") elif tree.tag == 'section': diff --git a/pydoctor/epydoc/markup/restructuredtext.py b/pydoctor/epydoc/markup/restructuredtext.py index c1c79ccfd..36e2cf796 100644 --- a/pydoctor/epydoc/markup/restructuredtext.py +++ b/pydoctor/epydoc/markup/restructuredtext.py @@ -41,13 +41,13 @@ from __future__ import annotations __docformat__ = 'epytext en' -from typing import Iterable, List, Optional, Sequence, Set, cast +from typing import Any, Iterable, List, Optional, Sequence, Set, cast import re from docutils import nodes from docutils.core import publish_string from docutils.writers import Writer -from docutils.parsers.rst.directives.admonitions import BaseAdmonition # type: ignore[import-untyped] +from docutils.parsers.rst.directives.admonitions import BaseAdmonition from docutils.readers.standalone import Reader as StandaloneReader from docutils.utils import Reporter from docutils.parsers.rst import Directive, directives @@ -123,7 +123,7 @@ class OptimizedReporter(Reporter): isn't very fast about processing its own debug messages. """ - def debug(self, *args: object, **kwargs: object) -> None: + def debug(self, *args: Any, **kwargs: Any) -> None: # type:ignore[override] pass class ParsedRstDocstring(ParsedDocstring): @@ -227,10 +227,10 @@ def __init__(self, document: nodes.document, errors: List[ParseError]): self.fields: List[Field] = [] self._newfields: Set[str] = set() - def visit_document(self, node: nodes.Node) -> None: + def visit_document(self, node: nodes.document) -> None: self.fields = [] - def visit_field(self, node: nodes.Node) -> None: + def visit_field(self, node: nodes.field) -> None: # Remove the field from the tree. node.parent.remove(node) @@ -247,6 +247,7 @@ def visit_field(self, node: nodes.Node) -> None: # Handle special fields: fbody = node[1] + assert isinstance(fbody, nodes.Element) if arg is None: for (list_tag, entry_tag) in CONSOLIDATED_FIELDS.items(): if tagname.lower() == list_tag: @@ -263,7 +264,7 @@ def visit_field(self, node: nodes.Node) -> None: if tagname.lower() not in self._newfields: newfield = Field('newfield', tagname.lower(), ParsedPlaintextDocstring(tagname), - node.line - 1) + (node.line or 1) - 1) self.fields.append(newfield) self._newfields.add(tagname.lower()) @@ -273,37 +274,40 @@ def _add_field(self, tagname: str, arg: Optional[str], fbody: Iterable[nodes.Node], - lineno: int + lineno: int | None ) -> None: field_doc = self.document.copy() for child in fbody: field_doc.append(child) field_parsed_doc = ParsedRstDocstring(field_doc, ()) - self.fields.append(Field(tagname, arg, field_parsed_doc, lineno - 1)) + self.fields.append(Field(tagname, arg, field_parsed_doc, (lineno or 1) - 1)) - def visit_field_list(self, node: nodes.Node) -> None: + def visit_field_list(self, node: nodes.field_list) -> None: # Remove the field list from the tree. The visitor will still walk # over the node's children. node.parent.remove(node) - def handle_consolidated_field(self, body: Sequence[nodes.Node], tagname: str) -> None: + def handle_consolidated_field(self, body: nodes.Element, tagname: str) -> None: """ Attempt to handle a consolidated section. """ if len(body) != 1: raise ValueError('does not contain a single list.') - elif body[0].tagname == 'bullet_list': - self.handle_consolidated_bullet_list(body[0], tagname) - elif (body[0].tagname == 'definition_list' and + if not isinstance(b0:=body[0], nodes.Element): + # unfornutate assertion required for typing purposes + raise ValueError('does not contain a list.') + if isinstance(b0, nodes.bullet_list): + self.handle_consolidated_bullet_list(b0, tagname) + elif (isinstance(b0, nodes.definition_list) and tagname in CONSOLIDATED_DEFLIST_FIELDS): - self.handle_consolidated_definition_list(body[0], tagname) + self.handle_consolidated_definition_list(b0, tagname) elif tagname in CONSOLIDATED_DEFLIST_FIELDS: raise ValueError('does not contain a bulleted list or ' 'definition list.') else: raise ValueError('does not contain a bulleted list.') - def handle_consolidated_bullet_list(self, items: Iterable[nodes.Node], tagname: str) -> None: + def handle_consolidated_bullet_list(self, items: nodes.bullet_list, tagname: str) -> None: # Check the contents of the list. In particular, each list # item should have the form: # - `arg`: description... @@ -314,29 +318,30 @@ def handle_consolidated_bullet_list(self, items: Iterable[nodes.Node], tagname: "description.") for item in items: n += 1 - if item.tagname != 'list_item' or len(item) == 0: + if not isinstance(item, nodes.list_item) or len(item) == 0: raise ValueError('bad bulleted list (bad child %d).' % n) - if item[0].tagname != 'paragraph': - if item[0].tagname == 'definition_list': + if not isinstance(i0:=item[0], nodes.paragraph): + if isinstance(i0, nodes.definition_list): raise ValueError(('list item %d contains a definition '+ 'list (it\'s probably indented '+ 'wrong).') % n) else: raise ValueError(_BAD_ITEM % n) - if len(item[0]) == 0: + if len(i0) == 0: raise ValueError(_BAD_ITEM % n) - if item[0][0].tagname != 'title_reference': + if not isinstance(i0[0], nodes.title_reference): raise ValueError(_BAD_ITEM % n) # Everything looks good; convert to multiple fields. for item in items: - # Extract the arg - arg = item[0][0].astext() + assert isinstance(item, nodes.list_item) # for typing + # Extract the arg, item[0][0] is safe since we checked eariler for malformated list. + arg = item[0][0].astext() # type: ignore # Extract the field body, and remove the arg - fbody = item[:] + fbody = cast('list[nodes.Element]', item[:]) fbody[0] = fbody[0].copy() - fbody[0][:] = item[0][1:] + fbody[0][:] = cast(nodes.paragraph, item[0])[1:] # Remove the separating ":", if present if (len(fbody[0]) > 0 and @@ -350,7 +355,7 @@ def handle_consolidated_bullet_list(self, items: Iterable[nodes.Node], tagname: # Wrap the field body, and add a new field self._add_field(tagname, arg, fbody, fbody[0].line) - def handle_consolidated_definition_list(self, items: Iterable[nodes.Node], tagname: str) -> None: + def handle_consolidated_definition_list(self, items: nodes.definition_list, tagname: str) -> None: # Check the list contents. n = 0 _BAD_ITEM = ("item %d is not well formed. Each item's term must " @@ -359,29 +364,31 @@ def handle_consolidated_definition_list(self, items: Iterable[nodes.Node], tagna "a type description.") for item in items: n += 1 - if (item.tagname != 'definition_list_item' or len(item) < 2 or - item[-1].tagname != 'definition'): + if (not isinstance(item, nodes.definition_list_item) or len(item) < 2 or + not isinstance(item[-1], nodes.definition) or + not isinstance(i0:=item[0], nodes.Element)): raise ValueError('bad definition list (bad child %d).' % n) if len(item) > 3: raise ValueError(_BAD_ITEM % n) - if not ((item[0][0].tagname == 'title_reference') or + if not ((isinstance(i0[0], nodes.title_reference)) or (self.ALLOW_UNMARKED_ARG_IN_CONSOLIDATED_FIELD and - isinstance(item[0][0], nodes.Text))): + isinstance(i0[0], nodes.Text))): raise ValueError(_BAD_ITEM % n) - for child in item[0][1:]: + for child in i0[1:]: if child.astext() != '': raise ValueError(_BAD_ITEM % n) # Extract it. for item in items: + assert isinstance(item, nodes.definition_list_item) # for typing # The basic field. - arg = item[0][0].astext() + arg = cast(nodes.Element, item[0])[0].astext() lineno = item[0].line - fbody = item[-1] + fbody = cast(nodes.definition, item[-1]) self._add_field(tagname, arg, fbody, lineno) # If there's a classifier, treat it as a type. if len(item) == 3: - type_descr = item[1] + type_descr = cast(nodes.Element, item[1]) self._add_field('type', arg, type_descr, lineno) def unknown_visit(self, node: nodes.Node) -> None: diff --git a/pydoctor/epydoc2stan.py b/pydoctor/epydoc2stan.py index eda352d1a..8b55497d8 100644 --- a/pydoctor/epydoc2stan.py +++ b/pydoctor/epydoc2stan.py @@ -1145,7 +1145,7 @@ def get_constructors_extra(cls:model.Class) -> ParsedDocstring | None: document = new_document('constructors') - elements = [] + elements: list[nodes.Node] = [] plural = 's' if len(constructors)>1 else '' elements.append(set_node_attributes( nodes.Text(f'Constructor{plural}: '), diff --git a/pydoctor/node2stan.py b/pydoctor/node2stan.py index a2e705623..8b2f00665 100644 --- a/pydoctor/node2stan.py +++ b/pydoctor/node2stan.py @@ -14,6 +14,7 @@ if TYPE_CHECKING: from twisted.web.template import Flattenable from pydoctor.epydoc.markup import DocstringLinker + from pydoctor.epydoc.docutils import obj_reference from pydoctor.epydoc.docutils import get_lineno from pydoctor.epydoc.doctest import colorize_codeblock, colorize_doctest @@ -23,7 +24,9 @@ def node2html(node: nodes.Node, docstring_linker: 'DocstringLinker') -> List[str """ Convert a L{docutils.nodes.Node} object to HTML strings. """ - visitor = HTMLTranslator(node.document, docstring_linker) + if (doc:=node.document) is None: + raise AssertionError(f'missing document attribute on {node}') + visitor = HTMLTranslator(doc, docstring_linker) node.walkabout(visitor) return visitor.body @@ -81,16 +84,17 @@ def __init__(self, if self.settings is None: if docutils_version_info >= (0,19): # Direct access to OptionParser is deprecated from Docutils 0.19 - # FIXME: https://github.com/twisted/pydoctor/issues/504 - # Stubs are not up to date because we use pinned version of types-docutils - settings = frontend.get_default_settings(html4css1.Writer()) # type:ignore[attr-defined] + settings = frontend.get_default_settings(html4css1.Writer()) else: - settings = frontend.OptionParser([html4css1.Writer()]).get_default_values() + settings = frontend.OptionParser([html4css1.Writer()]).get_default_values() # type: ignore # Save default settings as class attribute not to re-compute it all the times self.__class__.settings = settings + else: + # yes "optparse.Values" and "docutils.frontend.Values" are compatible. + settings = self.settings # type: ignore - document.settings = self.settings + document.settings = settings super().__init__(document) @@ -99,15 +103,15 @@ def __init__(self, self.section_level += 1 # Handle interpreted text (crossreferences) - def visit_title_reference(self, node: nodes.Node) -> None: + def visit_title_reference(self, node: nodes.title_reference) -> None: lineno = get_lineno(node) self._handle_reference(node, link_func=lambda target, label: self._linker.link_xref(target, label, lineno)) # Handle internal references - def visit_obj_reference(self, node: nodes.Node) -> None: + def visit_obj_reference(self, node: obj_reference) -> None: self._handle_reference(node, link_func=self._linker.link_to) - def _handle_reference(self, node: nodes.Node, link_func: Callable[[str, "Flattenable"], "Flattenable"]) -> None: + def _handle_reference(self, node: nodes.title_reference, link_func: Callable[[str, "Flattenable"], "Flattenable"]) -> None: label: "Flattenable" if 'refuri' in node.attributes: # Epytext parsed or manually constructed nodes. @@ -127,16 +131,16 @@ def _handle_reference(self, node: nodes.Node, link_func: Callable[[str, "Flatten self.body.append(flatten(link_func(target, label))) raise nodes.SkipNode() - def should_be_compact_paragraph(self, node: nodes.Node) -> bool: + def should_be_compact_paragraph(self, node: nodes.Element) -> bool: if self.document.children == [node]: return True else: return super().should_be_compact_paragraph(node) # type: ignore[no-any-return] - def visit_document(self, node: nodes.Node) -> None: + def visit_document(self, node: nodes.document) -> None: pass - def depart_document(self, node: nodes.Node) -> None: + def depart_document(self, node: nodes.document) -> None: pass def starttag(self, node: nodes.Node, tagname: str, suffix: str = '\n', **attributes: Any) -> str: @@ -156,7 +160,7 @@ def starttag(self, node: nodes.Node, tagname: str, suffix: str = '\n', **attribu # Get the list of all attribute dictionaries we need to munge. attr_dicts = [attributes] - if isinstance(node, nodes.Node): + if isinstance(node, nodes.Element): attr_dicts.append(node.attributes) if isinstance(node, dict): attr_dicts.append(node) @@ -197,7 +201,7 @@ def starttag(self, node: nodes.Node, tagname: str, suffix: str = '\n', **attribu return super().starttag(node, tagname, suffix, **attributes) # type: ignore[no-any-return] - def visit_doctest_block(self, node: nodes.Node) -> None: + def visit_doctest_block(self, node: nodes.doctest_block) -> None: pysrc = node[0].astext() if node.get('codeblock'): self.body.append(flatten(colorize_codeblock(pysrc))) @@ -215,64 +219,64 @@ def visit_doctest_block(self, node: nodes.Node) -> None: # this part of the HTMLTranslator is based on sphinx's HTMLTranslator: # https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/writers/html.py#L271 - def _visit_admonition(self, node: nodes.Node, name: str) -> None: + def _visit_admonition(self, node: nodes.Element, name: str) -> None: self.body.append(self.starttag( node, 'div', CLASS=('admonition ' + _valid_identifier(name)))) node.insert(0, nodes.title(name, name.title())) self.set_first_last(node) - def visit_note(self, node: nodes.Node) -> None: + def visit_note(self, node: nodes.Element) -> None: self._visit_admonition(node, 'note') - def depart_note(self, node: nodes.Node) -> None: + def depart_note(self, node: nodes.Element) -> None: self.depart_admonition(node) - def visit_warning(self, node: nodes.Node) -> None: + def visit_warning(self, node: nodes.Element) -> None: self._visit_admonition(node, 'warning') - def depart_warning(self, node: nodes.Node) -> None: + def depart_warning(self, node: nodes.Element) -> None: self.depart_admonition(node) - def visit_attention(self, node: nodes.Node) -> None: + def visit_attention(self, node: nodes.Element) -> None: self._visit_admonition(node, 'attention') - def depart_attention(self, node: nodes.Node) -> None: + def depart_attention(self, node: nodes.Element) -> None: self.depart_admonition(node) - def visit_caution(self, node: nodes.Node) -> None: + def visit_caution(self, node: nodes.Element) -> None: self._visit_admonition(node, 'caution') - def depart_caution(self, node: nodes.Node) -> None: + def depart_caution(self, node: nodes.Element) -> None: self.depart_admonition(node) - def visit_danger(self, node: nodes.Node) -> None: + def visit_danger(self, node: nodes.Element) -> None: self._visit_admonition(node, 'danger') - def depart_danger(self, node: nodes.Node) -> None: + def depart_danger(self, node: nodes.Element) -> None: self.depart_admonition(node) - def visit_error(self, node: nodes.Node) -> None: + def visit_error(self, node: nodes.Element) -> None: self._visit_admonition(node, 'error') - def depart_error(self, node: nodes.Node) -> None: + def depart_error(self, node: nodes.Element) -> None: self.depart_admonition(node) - def visit_hint(self, node: nodes.Node) -> None: + def visit_hint(self, node: nodes.Element) -> None: self._visit_admonition(node, 'hint') - def depart_hint(self, node: nodes.Node) -> None: + def depart_hint(self, node: nodes.Element) -> None: self.depart_admonition(node) - def visit_important(self, node: nodes.Node) -> None: + def visit_important(self, node: nodes.Element) -> None: self._visit_admonition(node, 'important') - def depart_important(self, node: nodes.Node) -> None: + def depart_important(self, node: nodes.Element) -> None: self.depart_admonition(node) - def visit_tip(self, node: nodes.Node) -> None: + def visit_tip(self, node: nodes.Element) -> None: self._visit_admonition(node, 'tip') - def depart_tip(self, node: nodes.Node) -> None: + def depart_tip(self, node: nodes.Element) -> None: self.depart_admonition(node) def visit_wbr(self, node: nodes.Node) -> None: @@ -281,13 +285,13 @@ def visit_wbr(self, node: nodes.Node) -> None: def depart_wbr(self, node: nodes.Node) -> None: pass - def visit_seealso(self, node: nodes.Node) -> None: + def visit_seealso(self, node: nodes.Element) -> None: self._visit_admonition(node, 'see also') - def depart_seealso(self, node: nodes.Node) -> None: + def depart_seealso(self, node: nodes.Element) -> None: self.depart_admonition(node) - def visit_versionmodified(self, node: nodes.Node) -> None: + def visit_versionmodified(self, node: nodes.Element) -> None: self.body.append(self.starttag(node, 'div', CLASS=node['type'])) def depart_versionmodified(self, node: nodes.Node) -> None: diff --git a/pydoctor/sphinx.py b/pydoctor/sphinx.py index dbec697a3..01ca8f970 100644 --- a/pydoctor/sphinx.py +++ b/pydoctor/sphinx.py @@ -13,7 +13,7 @@ Optional, Tuple ) -import appdirs +import platformdirs import attr import requests from cachecontrol import CacheControl @@ -271,7 +271,7 @@ def _generateLine(self, obj: Documentable) -> str: return f'{full_name} py:{domainname} -1 {url} {display}\n' -USER_INTERSPHINX_CACHE = appdirs.user_cache_dir("pydoctor") +USER_INTERSPHINX_CACHE = platformdirs.user_cache_dir("pydoctor") @attr.s(auto_attribs=True) diff --git a/pydoctor/test/epydoc/test_pyval_repr.py b/pydoctor/test/epydoc/test_pyval_repr.py index 1515c85c5..50e88adf6 100644 --- a/pydoctor/test/epydoc/test_pyval_repr.py +++ b/pydoctor/test/epydoc/test_pyval_repr.py @@ -15,7 +15,7 @@ def color(v: Any, linebreakok:bool=True, maxlines:int=5, linelen:int=40) -> str: colorizer = PyvalColorizer(linelen=linelen, linebreakok=linebreakok, maxlines=maxlines) parsed_doc = colorizer.colorize(v) - return parsed_doc.to_node().pformat() #type: ignore + return parsed_doc.to_node().pformat() def colorhtml(v: Any, linebreakok:bool=True, maxlines:int=5, linelen:int=40) -> str: colorizer = PyvalColorizer(linelen=linelen, linebreakok=linebreakok, maxlines=maxlines) diff --git a/pydoctor/test/test_commandline.py b/pydoctor/test/test_commandline.py index 5178af7ea..dc6623c1b 100644 --- a/pydoctor/test/test_commandline.py +++ b/pydoctor/test/test_commandline.py @@ -4,6 +4,8 @@ import re import sys +import pytest + from pydoctor.options import Options from pydoctor import driver @@ -67,6 +69,7 @@ def test_projectbasedir_absolute(tmp_path: Path) -> None: assert options.projectbasedirectory.is_absolute() +@pytest.mark.skipif("platform.python_implementation() == 'PyPy' and platform.system() == 'Windows'") def test_projectbasedir_symlink(tmp_path: Path) -> None: """ The --project-base-dir option, when given a path containing a symbolic link, @@ -206,6 +209,7 @@ def test_main_return_non_zero_on_warnings() -> None: assert 'report_module.py:9: Cannot find link target for "BadLink"' in stream.getvalue() +@pytest.mark.skipif("platform.python_implementation() == 'PyPy' and platform.system() == 'Windows'") def test_main_symlinked_paths(tmp_path: Path) -> None: """ The project base directory and package/module directories are normalized diff --git a/pydoctor/test/test_model.py b/pydoctor/test/test_model.py index 9dc9d4f33..115983595 100644 --- a/pydoctor/test/test_model.py +++ b/pydoctor/test/test_model.py @@ -340,7 +340,7 @@ def test_introspection_extension() -> None: testpackages = Path(__file__).parent / 'testpackages' -@pytest.mark.skipif("platform.python_implementation() == 'PyPy'") +@pytest.mark.skipif("platform.python_implementation() == 'PyPy' or platform.system() == 'Windows'") def test_c_module_text_signature(capsys:CapSys) -> None: c_module_invalid_text_signature = testpackages / 'c_module_invalid_text_signature' @@ -379,7 +379,7 @@ def test_c_module_text_signature(capsys:CapSys) -> None: # cleanup subprocess.getoutput(f'rm -f {package_path}/*.so') -@pytest.mark.skipif("platform.python_implementation() == 'PyPy'") +@pytest.mark.skipif("platform.python_implementation() == 'PyPy' or platform.system() == 'Windows'") def test_c_module_python_module_name_clash(capsys:CapSys) -> None: c_module_python_module_name_clash = testpackages / 'c_module_python_module_name_clash' package_path = c_module_python_module_name_clash / 'mymod' diff --git a/pydoctor/test/test_type_fields.py b/pydoctor/test/test_type_fields.py index c7742df82..103d402c6 100644 --- a/pydoctor/test/test_type_fields.py +++ b/pydoctor/test/test_type_fields.py @@ -48,7 +48,7 @@ def test_parsed_type_convert_obj_tokens_to_stan() -> None: ([("list", TokenType.OBJ), ("(", TokenType.DELIMITER), ("int", TokenType.OBJ), (")", TokenType.DELIMITER), (", ", TokenType.DELIMITER), ("optional", TokenType.CONTROL)], [(Tag('code', children=['list', '(', 'int', ')']), TokenType.OBJ), (", ", TokenType.DELIMITER), ("optional", TokenType.CONTROL)]), - ] + ] ann = ParsedTypeDocstring("") diff --git a/pydoctor/test/test_visitor.py b/pydoctor/test/test_visitor.py index e204cd598..be4bfefdc 100644 --- a/pydoctor/test/test_visitor.py +++ b/pydoctor/test/test_visitor.py @@ -19,7 +19,7 @@ def unknown_visit(self, ob: nodes.Node) -> None: @classmethod def get_children(cls, ob:nodes.Node) -> Iterable[nodes.Node]: if isinstance(ob, nodes.Element): - return ob.children # type:ignore[no-any-return] + return ob.children return [] class MainVisitor(DocutilsNodeVisitor): diff --git a/setup.cfg b/setup.cfg index c71d31d41..09edf2a4f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -19,12 +19,12 @@ classifiers = License :: OSI Approved :: MIT License Operating System :: OS Independent Programming Language :: Python :: 3 - Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 + Programming Language :: Python :: 3.13 Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: Implementation :: PyPy Topic :: Documentation @@ -32,11 +32,11 @@ classifiers = [options] packages = find: -python_requires = >=3.7 +python_requires = >=3.8 install_requires = ; New requirements are OK but since pydotor is published as a debian package, ; we should mak sure requirements already exists in repository https://tracker.debian.org/. - appdirs + platformdirs CacheControl[filecache]>=0.12.14 Twisted urllib3>=2.0 @@ -70,6 +70,7 @@ test = bs4 Sphinx pytest-subtests + setuptools mypy = mypy>=0.902 @@ -81,9 +82,7 @@ mypy = sphinx>=3.4.0 twisted types-requests - ; FIXME: https://github.com/twisted/pydoctor/issues/504 - ; This is pinned for now as newer versions are breaking our static checks. - types-docutils==0.17.5 + types-docutils types-toml [options.entry_points] diff --git a/tox.ini b/tox.ini index d4352576c..76a58483f 100644 --- a/tox.ini +++ b/tox.ini @@ -186,30 +186,6 @@ commands = {tty:--pretty:} \ {posargs:pydoctor docs/epytext_demo} -[testenv:mypy-docutils-stubs] -description = run mypy with docutils-stubs (does not pass for now) -; See: https://github.com/python/typeshed/issues/1269 - -deps = - mypy>=0.902 - mypy-zope - typing-extensions - ; Libraries which include type annotations: - hypothesis - pytest>=6.0.0 - sphinx>=3.4.0 - git+https://github.com/twisted/twisted.git - types-requests - docutils-stubs - types-toml - -commands = - mypy \ - --cache-dir="{toxworkdir}/mypy_cache" \ - --exclude='pydoctor/test/testpackages' \ - {tty:--pretty:} \ - {posargs:pydoctor docs/epytext_demo} - [testenv:apidocs] description = Build only the API documentation