Skip to content

Core

agenda

Agendas indicate the work to be done to symbols.

SourcedUse dataclass

SourcedUse(name: str, imports: list[Importation], target: AST)

Represents a usage of a name which is imported, occurring in a definition target (which is an AST node in src, the file the definition is being moved from).

MovingImport dataclass

MovingImport(bound: Importation)

An import statement that is to be moved.

DepartingImport dataclass

DepartingImport(bound: Importation, lineno: int, end_lineno: int)

Bases: MovingImport

An import statement being removed from src, by the deletion of a line range.

ArrivingImport dataclass

ArrivingImport(bound: Importation)

Bases: MovingImport

An import statement being added to dst, by the 'unparsing' of its AST node.

Departure dataclass

Departure(rng: tuple[int, int], *, name: str)

Bases: NamedPatch

A definition or importation leaving.

Arrival dataclass

Arrival(rng: tuple[int, int], *, name: str)

Bases: NamedPatch

A definition or importation arriving.

Documented dataclass

Documented(rng: tuple[int, int], *, name: str, depth: int)

Bases: NamedPatch

A documented definition or importation.

OrderOfBusiness dataclass

OrderOfBusiness(cop: list[SourcedAgendum] = list(), lop: list[Agendum] = list(), doc: list[Agendum] = list())

What to cop vs. what to lop (move/copy vs. delete), or, as a mutually exclusive option, what to doc (document i.e. enumerate).

Agenda

Agenda(ref: Checker, dest_ref: Checker | None)
Source code in src/mvdef/core/agenda.py
def __init__(self, ref: Checker, dest_ref: Checker | None) -> None:
    self.ref = ref
    self.dest_ref = dest_ref
    self.targets = []
    self.targeted = OrderOfBusiness()

no_clash

no_clash(mv: list[str]) -> None

No repetition, and * must be used on its own.

Source code in src/mvdef/core/agenda.py
def no_clash(self, mv: list[str]) -> None:
    """No repetition, and * must be used on its own."""
    if clash := set(mv).intersection(self.targets):
        raise AgendaFailure(
            f"{clash=} - target{'s'[: len(clash) - 1]} double booked",
        )
    elif "*" in [*mv, *self.targets] and len([*mv, *self.targets]) > 1:
        raise AgendaFailure("Agenda clash: wildcard '*' must be used solo")

intake

intake(mv: list[str]) -> None

Prepare to cop/lop, or doc (ensure no double bookings!) Note: prevents mixed usage of doc() with either cop() or lop().

Source code in src/mvdef/core/agenda.py
def intake(self, mv: list[str]) -> None:
    """
    Prepare to cop/lop, or doc (ensure no double bookings!)
    Note: prevents mixed usage of `doc()` with either `cop()` or `lop()`.
    """
    self.no_clash(mv)
    self.targets.extend(mv)

doc

doc(agenda: list[Agendum]) -> None

Note: cannot be used before or after cop or lop.

Source code in src/mvdef/core/agenda.py
def doc(self, agenda: list[Agendum]) -> None:
    """Note: cannot be used before or after cop or lop."""
    self.intake([a.name for a in agenda])
    self.targeted.doc.extend(agenda)

manifest

manifest(dry_run: bool, as_list: bool) -> str

Manifest of definitions from applying the targeted agenda to the target file.

Source code in src/mvdef/core/agenda.py
def manifest(self, dry_run: bool, as_list: bool) -> str:
    """
    Manifest of definitions from applying the `targeted` agenda to the target file.
    """
    def_depth = 1
    all_target_defs = self.ref.target_defs
    all_target_def_names = [d.name for d in all_target_defs if d.depth == def_depth]
    wildcard_present = "*" in [d.name for d in self.targeted.doc]  # TODO tidy this
    if wildcard_present:
        assert len(self.targeted.doc) == 1, "Wildcard must be used alone"
        manifest_names = all_target_def_names
        wc_agendum = self.targeted.doc.pop()
        self.outtake(["*"])
        self.document(matchers=manifest_names, src=wc_agendum.file)
    else:
        raise NotImplementedError("Only wildcard implemented for now")
    docket = [
        Documented(name=node.name, rng=self.def_rng(node.name), depth=node.depth)
        for agendum in self.unique_docs
        for node in [self.get_def_node(target_name=agendum.name)]
    ]
    docket.sort(key=lambda d: d.rng)
    # manifest_target_defs = [d for d in all_target_defs if d.name in manifest_names]
    sorted_docket_deps = docket == sorted(docket, key=lambda d: d.rng)
    assert sorted_docket_deps, f"Unsorted deps {docket}"
    if as_list:
        manif = "\n".join(manifest_names)
    elif dry_run:
        manif = format_all(manifest_names)
    else:
        raise NotImplementedError("File editing goes here")
    return manif

unidiff

unidiff(target_file: Path, is_src: bool) -> str

Unified diff from applying the targeted agenda to the target file. If the file does not exist yet, pass in an empty string for old to avoid reading it.

Source code in src/mvdef/core/agenda.py
def unidiff(self, target_file: Path, is_src: bool) -> str:
    """
    Unified diff from applying the `targeted` agenda to the target file. If the
    file does not exist yet, pass in an empty string for `old` to avoid reading it.
    """
    old = self.ref.code if is_src else self.dest_ref.code
    new = self.simulate(input_text=old)
    diff = get_unidiff_text(
        a=old.splitlines(keepends=True),
        b=new.splitlines(keepends=True),
        filename=target_file.name,
    )
    return diff

def_rng

def_rng(target_name: str) -> tuple[int, int]

Get the line range of a definition with the target name.

Source code in src/mvdef/core/agenda.py
def def_rng(self, target_name: str) -> tuple[int, int]:
    """
    Get the line range of a definition with the target name.
    """
    node = self.get_def_node(target_name=target_name)
    decos = node.decorator_list
    # Use the lineno of the first decorator, or of the node if it's undecorated
    start_lineno = (decos[0] if decos else node).lineno
    line_range = (start_lineno, node.end_lineno)
    return line_range

def_depth

def_depth(target_name: str) -> int

Get the tree depth of a definition with the target name.

Source code in src/mvdef/core/agenda.py
def def_depth(self, target_name: str) -> int:
    """
    Get the tree depth of a definition with the target name.
    """
    node = self.get_def_node(target_name=target_name)
    d = node.depth
    return d

unique_name_list

unique_name_list(duplicates: list) -> list[str]

Preserve unique names for each node type, in order of first appearance.

Source code in src/mvdef/core/agenda.py
def unique_name_list(self, duplicates: list) -> list[str]:
    """
    Preserve unique names for each node type, in order of first appearance.
    """
    unique_names = list({d.name: None for d in duplicates})
    return [next(d for d in duplicates if d.name == n) for n in unique_names]

sew_in_imports

sew_in_imports(imports: list[ArrivingImport], text: str, recheck: Checker = None) -> str

Leave sep of 2 lines if definitions go first, 1 line for anything else.

Source code in src/mvdef/core/agenda.py
def sew_in_imports(
    self,
    imports: list[ArrivingImport],
    text: str,
    recheck: Checker = None,
) -> str:
    """
    Leave sep of 2 lines if definitions go first, 1 line for anything else.
    """
    spacing = self.calculate_import_spacing(recheck=recheck)
    first = spacing.first_import_lineno
    # spacing.gap, spacing.first_import_lineno, spacing.future_offset
    start = first - 1 + spacing.future_offset if first else 0
    pre, suf = spacing.split_text(text, at=start)
    unparsed_imports = [imp.unparse() for imp in imports]
    pre_filtered = list(filter(None, pre))
    sewn = "\n".join(pre_filtered + unparsed_imports + [""] * spacing.gap) + suf
    return sewn

calculate_import_spacing

calculate_import_spacing(recheck: Checker) -> ImportSpacing

A quick estimate of how big a gap to leave after the supplied import(s), returning the gap (0, 1, or 2) and the first import line number [0 if none]. Also, if the gap is 1 (indicating import-first order), check if the first is the special __future__.annotations import (which must be left the first import). To do this thoroughly would presumably require interfacing with isort.

Source code in src/mvdef/core/agenda.py
def calculate_import_spacing(self, recheck: Checker) -> ImportSpacing:
    """
    A quick estimate of how big a gap to leave after the supplied import(s),
    returning the gap (0, 1, or 2) and the first import line number [0 if none].
    Also, if the gap is 1 (indicating import-first order), check if the first is the
    special `__future__.annotations` import (which must be left the first import).
    To do this thoroughly would presumably require interfacing with `isort`.
    """
    # TODO: use recheck's list of imports rather than the `original_ref` to
    # determine if defs/imports are in the file, to be spaced out against
    ref = recheck  # self.original_ref
    min_def_ln = min((d.lineno for d in ref.alldefs), default=0)
    min_imp_ln = min((i.source.lineno for i in ref.imports), default=0)
    # If the file is missing either definitions or imports, the min. will be 0.
    # Using or means that in this case, the other value would be used instead.
    # If there are *neither* definitions nor imports, the max. will be 0 too.
    min_lineno = min(min_def_ln, min_imp_ln) or max(min_def_ln, min_imp_ln)
    future_import_offset = 0
    if min_lineno == 0:
        # The file has no imports and no definitions (this is a null result)
        gap = 0
    elif min_imp_ln == min_lineno:
        # The file has imports before any definitions (this is a conclusive result)
        gap = 1
        # Check if first node line (which is an import) is __future__.annotations
        first_import = next(i for i in ref.imports if i.source.lineno == min_imp_ln)
        if first_import.fullName == "__future__.annotations":
            future_import_offset = 1  # Put import(s) after the future import
            # gap = 0  # Don't leave a gap (as it'd be after the future import)
    else:
        # The file has definitions before any imports (effectively starts with them)
        gap = 2
    spacing = ImportSpacing(
        gap=gap,
        first_import_lineno=min_imp_ln,
        future_offset=future_import_offset,
    )
    return spacing

pre_simulate

pre_simulate(input_text: str) -> str

First pass, with no change to import statements.

Source code in src/mvdef/core/agenda.py
def pre_simulate(self, input_text: str) -> str:
    """
    First pass, with no change to import statements.
    """
    filtered = self.apply(input_text, imports_in=[], imports_out=[])
    return filtered

resimulate

resimulate(input_text: str, *, imports_in: list[ArrivingImport], imports_out: list[DepartingImport], recheck: Checker | None = None) -> str

Second pass if necessary to remove import statements that would not be used after moving the mv definition(s) out of the file.

Source code in src/mvdef/core/agenda.py
def resimulate(
    self,
    input_text: str,
    *,
    imports_in: list[ArrivingImport],
    imports_out: list[DepartingImport],
    recheck: Checker | None = None,
) -> str:
    """
    Second pass if necessary to remove import statements that would not be used
    after moving the `mv` definition(s) out of the file.
    """
    filtered = self.apply(
        input_text,
        imports_in=imports_in,
        imports_out=imports_out,
        recheck=recheck,
    )
    return filtered

recheck

recheck(input_text: str) -> Checker

First pass, with no change to import statements.

Source code in src/mvdef/core/agenda.py
def recheck(self, input_text: str) -> Checker:
    """
    First pass, with no change to import statements.
    """
    if input_text == "x = 1\n\n\nclass A:\n\n\ny = 2\n":
        raise ValueError("WTF")
    return reparse(check=self.original_ref, input_text=input_text)

simulate

simulate(input_text: str) -> str

This method has no side effects on the state of self.

Source code in src/mvdef/core/agenda.py
def simulate(self, input_text: str) -> str:
    """
    This method has no side effects on the state of `self`.
    """
    pre_sim = self.pre_simulate(input_text=input_text)
    if self.original_ref is None:
        # (BUG?) No imports will be copped if dst is None (why?)
        return pre_sim
    # Note: recheck src for newly unused imports, and dst to gauge import spacing
    recheck = self.recheck(pre_sim)
    if self.is_src:
        unused_imports = self.compare_imports(recheck)
        if unused_imports:
            return self.resimulate(
                input_text,
                imports_in=[],
                imports_out=unused_imports,
            )
        else:
            return pre_sim
    else:
        import_uses = self.map_import_usage()
        if import_uses:
            dependent_imports = self.patch_dependents(uses=import_uses)
            return self.resimulate(
                input_text,
                imports_in=dependent_imports,
                imports_out=[],
                recheck=recheck,
            )
        else:
            return pre_sim

patch_dependents

patch_dependents(uses: list[SourcedUse]) -> list[ArrivingImport]

Patch any uses which depend on getting a new import. Turn the list of AST-sourced name binding uses into a list of patches to apply (prepend). Ensure not to patch any that are already present in the dst file.

Source code in src/mvdef/core/agenda.py
def patch_dependents(self, uses: list[SourcedUse]) -> list[ArrivingImport]:
    """
    Patch any uses which depend on getting a new import. Turn the list of
    AST-sourced name binding uses into a list of patches to apply (prepend).
    Ensure not to patch any that are already present in the dst file.
    """
    dst_import_names = {imp.name for imp in self.original_ref.imports}
    arrivals = [
        ArrivingImport(bound=used_import)
        for use in uses
        for used_import in use.imports
        if use.name not in dst_import_names
    ]
    return arrivals

map_import_usage

map_import_usage() -> list[SourcedUse]

AST ancestor subtrees of the node where each source import was marked as being 'used' (NB only one value: overwritten during walk, so it appears that we don't have access to all uses, only its last use).

However! Since we access the scopes during the walk, if we know they have at least one use (src_imp_name_trees) we can look up all uses of the same name.

This gets stored as all_src_imp_name_trees, and it tells us "what's above each usage node in the tree walked by Checker. We can match self.ref.target_defs to this 'ancestry' tree, to get all definition-scoped import uses (and which definitions they come from).

Having obtained those uses, we need to compare them to the imports themselves. This takes us from all_src_imp_name_trees to sourced_uses.

There are 2 possible ways we can cross-reference the uses to the imports
  • lopped::Patch.rng <-> used_on_line_ranges::Patch.rng
  • imports::Importation.source <-> ref.import_uses::(scope, node)

(1) Use line ranges [rejected: match AST nodes and unparse instead]

>>> used_on_line_ranges = {
        name: [
            Patch((name_node.lineno, name_node.end_lineno))
            for scope, name_node in use_list
        ]
        for name, use_list in self.ref.import_uses.items()
    }

(2) Match use name ancestor to a moved definition (AST node). If an import use occurs within a moved definition, colocate its import alongside, into dst. - NB: we store in used the node which uses the name. - NB: we store in imports the pyflakes.checker.Importation "binding".

This function is hardcoded to use self.ref, as we would never consider using self.dst_ref (which by definition does not contain the target_defs to move) or the recheck of src (which may have caused removal of used imports).

Source code in src/mvdef/core/agenda.py
def map_import_usage(self) -> list[SourcedUse]:
    """
    AST ancestor subtrees of the node where each source import was marked as being
    'used' (NB only one value: overwritten during walk, so it appears that we don't
    have access to all uses, only its last use).

    However! Since we access the `scopes` during the walk, if we know they have at
    least one use (`src_imp_name_trees`) we can look up all uses of the same name.

    This gets stored as `all_src_imp_name_trees`, and it tells us "what's above each
    usage node in the tree walked by `Checker`. We can match `self.ref.target_defs`
    to this 'ancestry' tree, to get all definition-scoped import uses (and which
    definitions they come from).

    Having obtained those uses, we need to compare them to the imports themselves.
    This takes us from `all_src_imp_name_trees` to `sourced_uses`.

    There are 2 possible ways we can cross-reference the uses to the imports:
      - lopped::Patch.rng <-> used_on_line_ranges::Patch.rng
      - imports::Importation.source <-> ref.import_uses::(scope, node)

    (1) Use line ranges [rejected: match AST nodes and unparse instead]

        >>> used_on_line_ranges = {
                name: [
                    Patch((name_node.lineno, name_node.end_lineno))
                    for scope, name_node in use_list
                ]
                for name, use_list in self.ref.import_uses.items()
            }

    (2) Match use name ancestor to a moved definition (AST node). If an import use
    occurs within a moved definition, colocate its import alongside, into dst.
      - NB: we store in `used` the node which uses the name.
      - NB: we store in `imports` the `pyflakes.checker.Importation` "binding".

    This function is hardcoded to use `self.ref`, as we would never consider using
    `self.dst_ref` (which by definition does not contain the `target_defs` to move)
    or the `recheck` of src (which may have caused removal of used imports).
    """
    # TODO: also duplicate future annotations without asking?
    if self.ref.imports:
        src_imp_name_trees = [
            self.ref.get_ancestors(node)
            for src_imp in self.ref.imports
            if isinstance(src_imp.used, tuple)  # skip if `used` is a bool
            for name, node in [src_imp.used]  # (FunctionScope, ast.Name)
        ]
        if src_imp_name_trees:
            # access without checking type: always (FunctionScope, ast.Name)
            # here because we control how the import_uses list is populated
            all_src_imp_name_trees = {
                name: [
                    self.ref.get_ancestors(name_node)
                    for (scope, name_node) in use_list
                ]
                for name, use_list in self.ref.import_uses.items()
            }
            cop_names = [c.name for c in self.unique_cops]
            target_defs_in_src_to_cop = list(map(self.get_def_node, cop_names))
            sourced_uses = [
                SourcedUse(
                    name=imp_name,
                    imports=[imp for imp in self.ref.imports],  # (BUG) not filtered
                    target=mv_target_in_src,
                )
                for imp_name, tree_group in all_src_imp_name_trees.items()
                for imp_ancestry in tree_group
                if imp_name in [imp.name for imp in self.ref.imports]
                if (
                    mv_target_in_src := next(
                        (
                            target_def
                            for target_def in target_defs_in_src_to_cop
                            if target_def in imp_ancestry
                        ),
                        None,
                    )
                )
                is not None
            ]
            # We could simply AST unparse the node, avoiding any need to
            # process multiple imports on the same line, though this would
            # lose any comments on the line(s). This will be incompatible with
            # isort ignore comments, but that's fine (for an initial solution).
        else:
            sourced_uses = []
    else:
        sourced_uses = []
    return sourced_uses

check

Override the pyflakes Checker to extract info for our purposes.

Checker

Checker(*args, **kwargs)

Bases: FailableMixIn, Checker

A subclass of the pyflakes Checker (overriding specific parts).

Source code in src/mvdef/core/check.py
def __init__(self, *args, **kwargs):
    self.code = kwargs.pop("code")
    self.verbose = kwargs.pop("verbose", False)
    self.escalate = kwargs.pop("escalate", False)
    self.target_cls = kwargs.pop("cls_defs", False)
    self.target_func = kwargs.pop("func_defs", False)
    self.funcdefs = []
    self.classdefs = []
    self.alldefs = []
    self.imports = []
    self.import_uses = {}
    super().__init__(*args, **kwargs)

target_defs property

target_defs: list[AST]

Expand to classdefs or either in future

handleNode

handleNode(node: AST | None, parent: AST) -> None

Subclass override

Source code in src/mvdef/core/check.py
def handleNode(self, node: AST | None, parent: AST) -> None:
    """Subclass override"""
    super().handleNode(node=node, parent=parent)
    if node is not None:
        setattr(node, "depth", self.get_ancestors(node, count=True))

CLASSDEF

CLASSDEF(node: AST) -> None

Subclass override

Source code in src/mvdef/core/check.py
def CLASSDEF(self, node: AST) -> None:
    """Subclass override"""
    super().CLASSDEF(node=node)
    self.classdefs.append(node)
    self.alldefs.append(node)

FUNCTIONDEF

FUNCTIONDEF(node: AST) -> None

Subclass override

Source code in src/mvdef/core/check.py
def FUNCTIONDEF(self, node: AST) -> None:
    """Subclass override"""
    super().FUNCTIONDEF(node=node)
    self.funcdefs.append(node)
    self.alldefs.append(node)

unused_imports cached

unused_imports() -> list[UnusedImport]

Import strings (in m.message_args[0] for each UnusedImport message m):

  • "import a( as o)" -> "a( as o)"
  • "from a import b( as o)" -> "a.b( as o)"
  • "from a.b import c( as o)" -> "a.b.c( as o)"
  • "from . import a( as o)" -> ".a( as o)"
  • "from .a import b( as o)" -> ".a.b( as o)"
  • "from .a.b import c( as o)" -> ".a.b.c( as o)"
Source code in src/mvdef/core/check.py
@cache
def unused_imports(self) -> list[UnusedImport]:
    """
    Import strings (in `m.message_args[0]` for each `UnusedImport` message `m`):

    - "import a( as o)"           -> "a( as o)"
    - "from a import b( as o)"    -> "a.b( as o)"
    - "from a.b import c( as o)"  -> "a.b.c( as o)"
    - "from . import a( as o)"    -> ".a( as o)"
    - "from .a import b( as o)"   -> ".a.b( as o)"
    - "from .a.b import c( as o)" -> ".a.b.c( as o)"
    """
    imps = [m for m in self.messages if isinstance(m, UnusedImport)]
    return imps

diff

Diff two files (in a way that can be applied as a patch).

Differ dataclass

Differ(src: Path, *, mv: list[str], source_ref: Checker, dst: Path | None = None, dest_ref: Checker | None = None, escalate: bool = False, verbose: bool = False)

Dataclass storing the source (and potentially destination) of symbols.

Importantly, the symbol names are stored as a list mv.

is_src property

is_src: bool

Check if the dst is None (indicating this is a source of symbols).

target_file property

target_file: Path

The src or dst depending on is_src.

old_code property

old_code: str

The source of the source or dest ref, depending on if is_src.

populate_agenda

populate_agenda() -> None

Call Agenda.remove or Agenda.bring with the mv list.

Source code in src/mvdef/core/diff.py
def populate_agenda(self) -> None:
    """Call `Agenda.remove` or `Agenda.bring` with the `mv` list."""
    if self.is_src:
        self.agenda.remove(self.mv, src=self.src)
    else:
        self.agenda.bring(self.mv, src=self.src, dst=self.dst)

unidiff

unidiff() -> str

The unified diff of the agenda (if agenda is empty, populates it first).

Source code in src/mvdef/core/diff.py
def unidiff(self) -> str:
    """The unified diff of the agenda (if `agenda` is empty, populates it first)."""
    if self.agenda.empty:
        self.populate_agenda()
    return self.agenda.unidiff(target_file=self.target_file, is_src=self.is_src)

execute

execute() -> None

See autoflake8, which uses rename (replace) with NamedTemporaryFile: https://github.com/fsouza/autoflake8/blob/main/autoflake8/fix.py#L668

Also autoflake, which doesn't: https://github.com/PyCQA/autoflake/blob/main/autoflake.py#L970

Source code in src/mvdef/core/diff.py
def execute(self) -> None:
    """
    See autoflake8, which uses rename (replace) with NamedTemporaryFile:
    https://github.com/fsouza/autoflake8/blob/main/autoflake8/fix.py#L668

    Also autoflake, which doesn't:
    https://github.com/PyCQA/autoflake/blob/main/autoflake.py#L970
    """
    if self.agenda.empty:
        self.populate_agenda()
    before = self.old_code
    after = self.agenda.simulate(input_text=before)
    with NamedTemporaryFile(delete=False, dir=self.target_file.parent) as output:
        tmp_path = Path(output.name)
        tmp_path.write_text(after)
    tmp_path.rename(self.target_file)
    return

manifest

manifest

Manifest dataclass

Manifest(src: Path, *, matchers: list[str], source_ref: Checker, dry_run: bool, list: bool, escalate: bool = False, verbose: bool = False)
populate_agenda
populate_agenda() -> None

Puts matchers (default ['*']) on Agenda.targeted.doc without regex expansion.

Source code in src/mvdef/core/manifest/manifest.py
def populate_agenda(self) -> None:
    """
    Puts `matchers` (default `['*']`) on `Agenda.targeted.doc`
    without regex expansion.
    """
    self.agenda.document(self.matchers, src=self.src)

parse

parse

parse(codestring, *, file: str | Path = '', verbose: bool = False, **kwargs) -> Checker | None

kwargs::{escalate: bool = False, target_cls: bool = False, target_all: bool = False}

Source code in src/mvdef/core/parse.py
def parse(
    codestring,
    *,
    file: str | Path = "",
    verbose: bool = False,
    **kwargs,
) -> Checker | None:
    """
    kwargs::{escalate: bool = False, target_cls: bool = False, target_all: bool = False}
    """
    report = reporter._makeDefaultReporter()
    filename = str(file)
    try:
        tree = ast.parse(codestring, filename=filename)
    except SyntaxError as e:
        report.syntaxError(filename, e.args[0], e.lineno, e.offset, e.text)
        if kwargs.get("escalate", False):
            raise
    except Exception:
        report.unexpectedError(filename, "problem decoding source")
        if kwargs.get("escalate", False):
            raise
    else:
        w = Checker(tree, code=codestring, filename=filename, verbose=verbose, **kwargs)
        w.messages.sort(key=lambda m: m.lineno)
        if verbose:
            for m in w.messages:
                print(
                    f"• {type(m).__name__} {list(m.message_args)}",
                    f"L{m.lineno} col {m.col} in {filename or 'STDIN'}",
                )
        return w
    return None

reparse

reparse(check: Checker, input_text: str) -> Checker

Parse new text with the settings from an existing parsed result (a Checker object).

Create a new Checker with the same settings as the current instaance, but change the input file contents (equivalent to overwriting the file and parsing it again).

Source code in src/mvdef/core/parse.py
def reparse(check: Checker, input_text: str) -> Checker:
    """
    Parse new text with the settings from an existing parsed result (a `Checker` object).

    Create a new Checker with the same settings as the current instaance, but change
    the input file contents (equivalent to overwriting the file and parsing it again).
    """
    check_settings = {
        "verbose": check.verbose,
        "escalate": check.escalate,
        "cls_defs": check.target_cls,
        "func_defs": check.target_func,
    }
    # if input_text == "x = 1\n\n\nclass A:\n\n\ny = 2\n":
    #     raise ValueError("WTF")
    return parse(input_text, file=check.filename, **check_settings)