Numexpr Parser

This module provides the parser for numexpr expressions.

class formulate.numexpr_parser.Action(name)

Bases: object

class formulate.numexpr_parser.AmbiguousExpander(to_expand, tree_class, node_builder)

Bases: object

class formulate.numexpr_parser.AmbiguousIntermediateExpander(tree_class, node_builder)

Bases: object

class formulate.numexpr_parser.BasicLexer(conf: LexerConf, comparator=None)

Bases: Lexer

callback: dict[str, Callable[[Token], Token]]
ignore_types: frozenset[str]
lex(state: LexerState, parser_state: Any) Iterator[Token]
match(text, pos)
newline_types: frozenset[str]
next_token(lex_state: LexerState, parser_state: Any = None) Token
re: ModuleType
property scanner
terminals: Collection[TerminalDef]
user_callbacks: dict[str, Callable[[Token], Token]]
class formulate.numexpr_parser.CallChain(callback1, callback2, cond)

Bases: object

class formulate.numexpr_parser.ChildFilter(to_include, append_none, node_builder)

Bases: object

class formulate.numexpr_parser.ChildFilterLALR(to_include, append_none, node_builder)

Bases: ChildFilter

class formulate.numexpr_parser.ChildFilterLALR_NoPlaceholders(to_include, node_builder)

Bases: ChildFilter

exception formulate.numexpr_parser.ConfigurationError

Bases: LarkError, ValueError

class formulate.numexpr_parser.ContextualLexer(conf: LexerConf, states: dict[str, Collection[str]], always_accept: Collection[str] = ())

Bases: Lexer

lex(lexer_state: LexerState, parser_state: Any) Iterator[Token]
lexers: dict[str, BasicLexer]
root_lexer: BasicLexer
exception formulate.numexpr_parser.DedentError

Bases: LarkError

class formulate.numexpr_parser.ExpandSingleChild(node_builder)

Bases: object

exception formulate.numexpr_parser.GrammarError

Bases: LarkError

class formulate.numexpr_parser.Indenter

Bases: PostLex, ABC

abstract property CLOSE_PAREN_types: list[str]
abstract property DEDENT_type: str
abstract property INDENT_type: str
abstract property NL_type: str
abstract property OPEN_PAREN_types: list[str]
property always_accept

Built-in immutable sequence.

If no argument is given, the constructor returns an empty tuple. If iterable is specified the tuple is initialized from iterable’s items.

If the argument is a tuple, the return value is the same object.

handle_NL(token: Token) Iterator[Token]
indent_level: list[int]
paren_level: int
process(stream)
abstract property tab_len: int
class formulate.numexpr_parser.InlineTransformer(visit_tokens: bool = True)

Bases: Transformer

class formulate.numexpr_parser.IntParseTable(states, start_states, end_states)

Bases: ParseTable

classmethod from_ParseTable(parse_table)
class formulate.numexpr_parser.Interpreter

Bases: _Decoratable, ABC, Generic[_Leaf_T, _Return_T]

visit(tree: Tree[_Leaf_T]) _Return_T
visit_children(tree: Tree[_Leaf_T]) list
class formulate.numexpr_parser.LALR_Parser(parser_conf, debug=False, strict=False)

Bases: Serialize

classmethod deserialize(data, memo, callbacks, debug=False)
parse(lexer, start, on_error=None)
parse_interactive(lexer, start)
serialize(memo: Any = None) dict[str, Any]
class formulate.numexpr_parser.Lark(grammar: Grammar | str | IO[str], **options)

Bases: Serialize

get_terminal(name: str) TerminalDef
grammar: Grammar
lex(text: str, dont_ignore: bool = False) Iterator[Token]
lexer: Lexer
classmethod load(f) _T
classmethod open(grammar_filename: str, rel_to: str | None = None, **options) _T
classmethod open_from_package(package: str, grammar_path: str, search_paths: Sequence[str] = [''], **options) _T
options: LarkOptions
parse(text: str, start: str | None = None, on_error: Callable[[UnexpectedInput], bool] | None = None) ParseTree
parse_interactive(text: str | None = None, start: str | None = None) InteractiveParser
parser: ParsingFrontend
save(f, exclude_options: Collection[str] = ()) None
source_grammar: str
source_path: str
terminals: Collection[TerminalDef]
class formulate.numexpr_parser.LarkOptions(options_dict: dict[str, Any])

Bases: Serialize

OPTIONS_DOC = '\n    **===  General Options  ===**\n\n    start\n            The start symbol. Either a string, or a list of strings for multiple possible starts (Default: "start")\n    debug\n            Display debug information and extra warnings. Use only when debugging (Default: ``False``)\n            When used with Earley, it generates a forest graph as "sppf.png", if \'dot\' is installed.\n    strict\n            Throw an exception on any potential ambiguity, including shift/reduce conflicts, and regex collisions.\n    transformer\n            Applies the transformer to every parse tree (equivalent to applying it after the parse, but faster)\n    propagate_positions\n            Propagates positional attributes into the \'meta\' attribute of all tree branches.\n            Sets attributes: (line, column, end_line, end_column, start_pos, end_pos,\n                              container_line, container_column, container_end_line, container_end_column)\n            Accepts ``False``, ``True``, or a callable, which will filter which nodes to ignore when propagating.\n    maybe_placeholders\n            When ``True``, the ``[]`` operator returns ``None`` when not matched.\n            When ``False``,  ``[]`` behaves like the ``?`` operator, and returns no value at all.\n            (default= ``True``)\n    cache\n            Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. LALR only for now.\n\n            - When ``False``, does nothing (default)\n            - When ``True``, caches to a temporary file in the local directory\n            - When given a string, caches to the path pointed by the string\n    regex\n            When True, uses the ``regex`` module instead of the stdlib ``re``.\n    g_regex_flags\n            Flags that are applied to all terminals (both regex and strings)\n    keep_all_tokens\n            Prevent the tree builder from automagically removing "punctuation" tokens (Default: ``False``)\n    tree_class\n            Lark will produce trees comprised of instances of this class instead of the default ``lark.Tree``.\n\n    **=== Algorithm Options ===**\n\n    parser\n            Decides which parser engine to use. Accepts "earley" or "lalr". (Default: "earley").\n            (there is also a "cyk" option for legacy)\n    lexer\n            Decides whether or not to use a lexer stage\n\n            - "auto" (default): Choose for me based on the parser\n            - "basic": Use a basic lexer\n            - "contextual": Stronger lexer (only works with parser="lalr")\n            - "dynamic": Flexible and powerful (only with parser="earley")\n            - "dynamic_complete": Same as dynamic, but tries *every* variation of tokenizing possible.\n    ambiguity\n            Decides how to handle ambiguity in the parse. Only relevant if parser="earley"\n\n            - "resolve": The parser will automatically choose the simplest derivation\n              (it chooses consistently: greedy for tokens, non-greedy for rules)\n            - "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest).\n            - "forest": The parser will return the root of the shared packed parse forest.\n\n    **=== Misc. / Domain Specific Options ===**\n\n    postlex\n            Lexer post-processing (Default: ``None``) Only works with the basic and contextual lexers.\n    priority\n            How priorities should be evaluated - "auto", ``None``, "normal", "invert" (Default: "auto")\n    lexer_callbacks\n            Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution.\n    use_bytes\n            Accept an input of type ``bytes`` instead of ``str``.\n    edit_terminals\n            A callback for editing the terminals before parse.\n    import_paths\n            A List of either paths or loader functions to specify from where grammars are imported\n    source_path\n            Override the source of from where the grammar was loaded. Useful for relative imports and unconventional grammar loading\n    **=== End of Options ===**\n    '
ambiguity: Literal[auto, resolve, explicit, forest]
cache: bool | str
debug: bool
classmethod deserialize(data: dict[str, Any], memo: dict[int, TerminalDef | Rule]) LarkOptions
edit_terminals: Callable[[TerminalDef], TerminalDef] | None
g_regex_flags: int
import_paths: list[str | Callable[[None | str | PackageResource, str], tuple[str, str]]]
keep_all_tokens: bool
lexer: _LexerArgType
lexer_callbacks: dict[str, Callable[[Token], Token]]
maybe_placeholders: bool
parser: _ParserArgType
postlex: PostLex | None
priority: Literal[auto, normal, invert] | None
propagate_positions: bool | str
regex: bool
serialize(memo=None) dict[str, Any]
source_path: str | None
start: list[str]
strict: bool
transformer: Transformer | None
tree_class: Any
use_bytes: bool
formulate.numexpr_parser.Lark_StandAlone(**kwargs)
exception formulate.numexpr_parser.LexError

Bases: LarkError

class formulate.numexpr_parser.Lexer

Bases: ABC

abstractmethod lex(lexer_state: LexerState, parser_state: Any) Iterator[Token]
make_lexer_state(text)
class formulate.numexpr_parser.LexerConf(terminals: Collection[TerminalDef], re_module: ModuleType, ignore: Collection[str] = (), postlex: PostLex | None = None, callbacks: dict[str, Callable[[Token], Token]] | None = None, g_regex_flags: int = 0, skip_validation: bool = False, use_bytes: bool = False, strict: bool = False)

Bases: Serialize

callbacks: dict[str, _Callback]
g_regex_flags: int
ignore: Collection[str]
lexer_type: _LexerArgType | None
postlex: PostLex | None
re_module: ModuleType
skip_validation: bool
strict: bool
terminals: Collection[TerminalDef]
use_bytes: bool
class formulate.numexpr_parser.LexerState(text: str, line_ctr: LineCounter | None = None, last_token: Token | None = None)

Bases: object

last_token: Token | None
line_ctr: LineCounter
text: str
class formulate.numexpr_parser.LexerThread(lexer: Lexer, lexer_state: LexerState)

Bases: object

classmethod from_text(lexer: Lexer, text: str)
lex(parser_state)
class formulate.numexpr_parser.LineCounter(newline_char)

Bases: object

char_pos
column
feed(token: Token, test_newline=True)
line
line_start_pos
newline_char
class formulate.numexpr_parser.Meta

Bases: object

column: int
empty: bool
end_column: int
end_line: int
end_pos: int
line: int
match_tree: bool
orig_expansion: list[TerminalDef]
start_pos: int
exception formulate.numexpr_parser.MissingVariableError

Bases: LarkError

class formulate.numexpr_parser.NonTerminal(name: str)

Bases: Symbol

is_term: ClassVar[bool] = False
name: str
class formulate.numexpr_parser.ParseConf(parse_table, callbacks, start)

Bases: object

callbacks
end_state
parse_table
start
start_state
states
class formulate.numexpr_parser.ParseTable(states, start_states, end_states)

Bases: object

classmethod deserialize(data, memo)
serialize(memo)
class formulate.numexpr_parser.ParseTreeBuilder(rules, tree_class, propagate_positions=False, ambiguous=False, maybe_placeholders=False)

Bases: object

create_callback(transformer=None)
class formulate.numexpr_parser.ParserConf(rules, callbacks, start)

Bases: Serialize

class formulate.numexpr_parser.ParserState(parse_conf, lexer, state_stack=None, value_stack=None)

Bases: object

copy()
feed_token(token, is_end=False)
lexer
parse_conf
property position
state_stack
value_stack
class formulate.numexpr_parser.ParsingFrontend(lexer_conf: LexerConf, parser_conf: ParserConf, options, parser=None)

Bases: Serialize

lexer_conf: LexerConf
options: Any
parse(text: str, start=None, on_error=None)
parse_interactive(text: str | None = None, start=None)
parser_conf: ParserConf
class formulate.numexpr_parser.Pattern(value: str, flags: Collection[str] = (), raw: str | None = None)

Bases: Serialize, ABC

flags: Collection[str]
abstract property max_width: int
abstract property min_width: int
raw: str | None
abstractmethod to_regexp() str
type: ClassVar[str]
value: str
class formulate.numexpr_parser.PatternRE(value: str, flags: Collection[str] = (), raw: str | None = None)

Bases: Pattern

property max_width: int
property min_width: int
to_regexp() str
type: ClassVar[str] = 're'
class formulate.numexpr_parser.PatternStr(value: str, flags: Collection[str] = (), raw: str | None = None)

Bases: Pattern

property max_width: int
property min_width: int
to_regexp() str
type: ClassVar[str] = 'str'
class formulate.numexpr_parser.PostLex

Bases: ABC

always_accept: Iterable[str] = ()
abstractmethod process(stream: Iterator[Token]) Iterator[Token]
class formulate.numexpr_parser.PostLexConnector(lexer, postlexer)

Bases: object

lex(lexer_state, parser_state)
class formulate.numexpr_parser.PropagatePositions(node_builder, node_filter=None)

Bases: object

class formulate.numexpr_parser.PythonIndenter

Bases: Indenter

CLOSE_PAREN_types = ['RPAR', 'RSQB', 'RBRACE']
DEDENT_type = '_DEDENT'
INDENT_type = '_INDENT'
NL_type = '_NEWLINE'
OPEN_PAREN_types = ['LPAR', 'LSQB', 'LBRACE']
tab_len = 8
class formulate.numexpr_parser.Rule(origin, expansion, order=0, alias=None, options=None)

Bases: Serialize

alias
expansion
options
order
origin
class formulate.numexpr_parser.RuleOptions(keep_all_tokens: bool = False, expand1: bool = False, priority: int | None = None, template_source: str | None = None, empty_indices: tuple[bool, ...] = ())

Bases: Serialize

empty_indices: tuple[bool, ...]
expand1: bool
keep_all_tokens: bool
priority: int | None
template_source: str | None
class formulate.numexpr_parser.Scanner(terminals, g_regex_flags, re_, use_bytes, match_whole=False)

Bases: object

match(text, pos)
class formulate.numexpr_parser.Serialize

Bases: object

classmethod deserialize(data: dict[str, Any], memo: dict[int, Any]) _T
memo_serialize(types_to_memoize: list) Any
serialize(memo=None) dict[str, Any]
class formulate.numexpr_parser.SerializeMemoizer(types_to_memoize: list)

Bases: Serialize

classmethod deserialize(data: dict[int, Any], namespace: dict[str, Any], memo: dict[Any, Any]) dict[int, Any]
in_types(value: Serialize) bool
serialize() dict[int, Any]
class formulate.numexpr_parser.Symbol(name: str)

Bases: Serialize

property fullrepr
is_term: ClassVar[bool] = NotImplemented
name: str
renamed(f)
class formulate.numexpr_parser.Terminal(name, filter_out=False)

Bases: Symbol

property fullrepr
is_term: ClassVar[bool] = True
name: str
renamed(f)
class formulate.numexpr_parser.TerminalDef(name: str, pattern: Pattern, priority: int = 0)

Bases: Serialize

name: str
pattern: Pattern
priority: int
user_repr() str
class formulate.numexpr_parser.Token(type: str, value: Any, start_pos: int | None = None, line: int | None = None, column: int | None = None, end_line: int | None = None, end_column: int | None = None, end_pos: int | None = None)
class formulate.numexpr_parser.Token(type_: str, value: Any, start_pos: int | None = None, line: int | None = None, column: int | None = None, end_line: int | None = None, end_column: int | None = None, end_pos: int | None = None)

Bases: str

column: int | None
end_column: int | None
end_line: int | None
end_pos: int | None
line: int | None
classmethod new_borrow_pos(type_: str, value: Any, borrow_t: Token) _T
start_pos: int | None
type: str
update(type: str | None = None, value: Any | None = None) Token
update(type_: str | None = None, value: Any | None = None) Token
value: Any
class formulate.numexpr_parser.Transformer(visit_tokens: bool = True)

Bases: _Decoratable, ABC, Generic[_Leaf_T, _Return_T]

transform(tree: Tree[_Leaf_T]) _Return_T
class formulate.numexpr_parser.TransformerChain(*transformers: Transformer | TransformerChain)

Bases: Generic[_Leaf_T, _Return_T]

transform(tree: Tree[_Leaf_T]) _Return_T
transformers: tuple[Transformer | TransformerChain, ...]
class formulate.numexpr_parser.Transformer_InPlace(visit_tokens: bool = True)

Bases: Transformer

transform(tree: Tree[_Leaf_T]) _Return_T
class formulate.numexpr_parser.Transformer_InPlaceRecursive(visit_tokens: bool = True)

Bases: Transformer

class formulate.numexpr_parser.Transformer_NonRecursive(visit_tokens: bool = True)

Bases: Transformer

transform(tree: Tree[_Leaf_T]) _Return_T
class formulate.numexpr_parser.Tree(data: str, children: list[_Leaf_T | Tree[_Leaf_T]], meta: Meta | None = None)

Bases: Generic[_Leaf_T], numexprnode

children: list[_Leaf_T | Tree[_Leaf_T]]
data: str
find_data(data: str) Iterator[Tree[_Leaf_T]]
find_pred(pred: Callable[[Tree[_Leaf_T]], bool]) Iterator[Tree[_Leaf_T]]
iter_subtrees() Iterator[Tree[_Leaf_T]]
iter_subtrees_topdown()
property meta: Meta
pretty(indent_str: str = '  ') str
exception formulate.numexpr_parser.UnexpectedCharacters(seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None, terminals_by_name=None, considered_rules=None)

Bases: LexError, UnexpectedInput

allowed: set[str]
considered_tokens: set[Any]
exception formulate.numexpr_parser.UnexpectedEOF(expected, state=None, terminals_by_name=None)

Bases: ParseError, UnexpectedInput

expected: list[Token]
exception formulate.numexpr_parser.UnexpectedInput

Bases: LarkError

column: int
get_context(text: str, span: int = 40) str
line: int
match_examples(parse_fn: Callable[[str], Tree], examples: Mapping[T, Iterable[str]] | Iterable[tuple[T, Iterable[str]]], token_type_match_fallback: bool = False, use_accepts: bool = True) T | None
pos_in_stream = None
state: Any
exception formulate.numexpr_parser.UnexpectedToken(token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None)

Bases: ParseError, UnexpectedInput

property accepts: set[str]
considered_rules: set[str]
expected: set[str]
interactive_parser: InteractiveParser
class formulate.numexpr_parser.UnlessCallback(scanner)

Bases: object

exception formulate.numexpr_parser.VisitError(rule, obj, orig_exc)

Bases: LarkError

obj: Tree | Token
orig_exc: Exception
class formulate.numexpr_parser.Visitor

Bases: VisitorBase, ABC, Generic[_Leaf_T]

visit(tree: Tree[_Leaf_T]) Tree[_Leaf_T]
visit_topdown(tree: Tree[_Leaf_T]) Tree[_Leaf_T]
class formulate.numexpr_parser.VisitorBase

Bases: object

class formulate.numexpr_parser.Visitor_Recursive

Bases: VisitorBase, Generic[_Leaf_T]

visit(tree: Tree[_Leaf_T]) Tree[_Leaf_T]
visit_topdown(tree: Tree[_Leaf_T]) Tree[_Leaf_T]
formulate.numexpr_parser.apply_visit_wrapper(func, name, wrapper)
formulate.numexpr_parser.assert_config(value, options: Collection, msg='Got %r, expected one of %s')
formulate.numexpr_parser.classify(seq: Iterable, key: Callable | None = None, value: Callable | None = None) dict
formulate.numexpr_parser.create_basic_lexer(lexer_conf, parser, postlex, options) BasicLexer
formulate.numexpr_parser.create_contextual_lexer(lexer_conf: LexerConf, parser, postlex, options) ContextualLexer
formulate.numexpr_parser.create_lalr_parser(lexer_conf: LexerConf, parser_conf: ParserConf, options=None) LALR_Parser
formulate.numexpr_parser.get_regexp_width(expr: str) tuple[int, int] | list[int]
formulate.numexpr_parser.inplace_transformer(func)
formulate.numexpr_parser.make_propagate_positions(option)
formulate.numexpr_parser.maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens)
formulate.numexpr_parser.maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indices: list[bool])
formulate.numexpr_parser.merge_transformers(base_transformer=None, **transformers_to_merge)
class formulate.numexpr_parser.numexprnode

Bases: object

formulate.numexpr_parser.v_args(inline: bool = False, meta: bool = False, tree: bool = False, wrapper: Callable | None = None) Callable[[Callable[[...], _Return_T] | type], Callable[[...], _Return_T] | type]
formulate.numexpr_parser.visit_children_decor(func: Callable[[type[Interpreter], _Return_T], _R]) Callable[[type[Interpreter], _Return_T], _R]