Coverage for /home/tbone/.local/share/hatch/env/virtual/importnb-aVRh-lqt/testrelease.interactive/lib/python3.9/site-packages/importnb/_json_parser.py: 53%

1875 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2022-10-03 14:24 -0700

1# The file was automatically generated by Lark v1.1.2 

2__version__ = "1.1.2" 

3 

4# 

5# 

6# Lark Stand-alone Generator Tool 

7# ---------------------------------- 

8# Generates a stand-alone LALR(1) parser 

9# 

10# Git: https://github.com/erezsh/lark 

11# Author: Erez Shinan (erezshin@gmail.com) 

12# 

13# 

14# >>> LICENSE 

15# 

16# This tool and its generated code use a separate license from Lark, 

17# and are subject to the terms of the Mozilla Public License, v. 2.0. 

18# If a copy of the MPL was not distributed with this 

19# file, You can obtain one at https://mozilla.org/MPL/2.0/. 

20# 

21# If you wish to purchase a commercial license for this tool and its 

22# generated code, you may contact me via email or otherwise. 

23# 

24# If MPL2 is incompatible with your free or open-source project, 

25# contact me and we'll work it out. 

26# 

27# 

28 

29from abc import ABC, abstractmethod 

30from collections.abc import Sequence 

31from types import ModuleType 

32from typing import ( 

33 TypeVar, Generic, Type, Tuple, List, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any, 

34 Union, Iterable, IO, TYPE_CHECKING, 

35 Pattern as REPattern, ClassVar, Set, Mapping 

36) 

37 

38 

39class LarkError(Exception): 

40 pass 

41 

42 

43class ConfigurationError(LarkError, ValueError): 

44 pass 

45 

46 

47def assert_config(value, options: Collection, msg='Got %r, expected one of %s'): 

48 if value not in options: 

49 raise ConfigurationError(msg % (value, options)) 

50 

51 

52class GrammarError(LarkError): 

53 pass 

54 

55 

56class ParseError(LarkError): 

57 pass 

58 

59 

60class LexError(LarkError): 

61 pass 

62 

63T = TypeVar('T') 

64 

65class UnexpectedInput(LarkError): 

66 #-- 

67 line: int 

68 column: int 

69 pos_in_stream = None 

70 state: Any 

71 _terminals_by_name = None 

72 

73 def get_context(self, text: str, span: int=40) -> str: 

74 #-- 

75 assert self.pos_in_stream is not None, self 

76 pos = self.pos_in_stream 

77 start = max(pos - span, 0) 

78 end = pos + span 

79 if not isinstance(text, bytes): 

80 before = text[start:pos].rsplit('\n', 1)[-1] 

81 after = text[pos:end].split('\n', 1)[0] 

82 return before + after + '\n' + ' ' * len(before.expandtabs()) + '^\n' 

83 else: 

84 before = text[start:pos].rsplit(b'\n', 1)[-1] 

85 after = text[pos:end].split(b'\n', 1)[0] 

86 return (before + after + b'\n' + b' ' * len(before.expandtabs()) + b'^\n').decode("ascii", "backslashreplace") 

87 

88 def match_examples(self, parse_fn: 'Callable[[str], Tree]', 

89 examples: Union[Mapping[T, Iterable[str]], Iterable[Tuple[T, Iterable[str]]]], 

90 token_type_match_fallback: bool=False, 

91 use_accepts: bool=True 

92 ) -> Optional[T]: 

93 #-- 

94 assert self.state is not None, "Not supported for this exception" 

95 

96 if isinstance(examples, Mapping): 

97 examples = examples.items() 

98 

99 candidate = (None, False) 

100 for i, (label, example) in enumerate(examples): 

101 assert not isinstance(example, str), "Expecting a list" 

102 

103 for j, malformed in enumerate(example): 

104 try: 

105 parse_fn(malformed) 

106 except UnexpectedInput as ut: 

107 if ut.state == self.state: 

108 if ( 

109 use_accepts 

110 and isinstance(self, UnexpectedToken) 

111 and isinstance(ut, UnexpectedToken) 

112 and ut.accepts != self.accepts 

113 ): 

114 logger.debug("Different accepts with same state[%d]: %s != %s at example [%s][%s]" % 

115 (self.state, self.accepts, ut.accepts, i, j)) 

116 continue 

117 if ( 

118 isinstance(self, (UnexpectedToken, UnexpectedEOF)) 

119 and isinstance(ut, (UnexpectedToken, UnexpectedEOF)) 

120 ): 

121 if ut.token == self.token: ## 

122 

123 logger.debug("Exact Match at example [%s][%s]" % (i, j)) 

124 return label 

125 

126 if token_type_match_fallback: 

127 ## 

128 

129 if (ut.token.type == self.token.type) and not candidate[-1]: 

130 logger.debug("Token Type Fallback at example [%s][%s]" % (i, j)) 

131 candidate = label, True 

132 

133 if candidate[0] is None: 

134 logger.debug("Same State match at example [%s][%s]" % (i, j)) 

135 candidate = label, False 

136 

137 return candidate[0] 

138 

139 def _format_expected(self, expected): 

140 if self._terminals_by_name: 

141 d = self._terminals_by_name 

142 expected = [d[t_name].user_repr() if t_name in d else t_name for t_name in expected] 

143 return "Expected one of: \n\t* %s\n" % '\n\t* '.join(expected) 

144 

145 

146class UnexpectedEOF(ParseError, UnexpectedInput): 

147 #-- 

148 expected: 'List[Token]' 

149 

150 def __init__(self, expected, state=None, terminals_by_name=None): 

151 super(UnexpectedEOF, self).__init__() 

152 

153 self.expected = expected 

154 self.state = state 

155 from .lexer import Token 

156 self.token = Token("<EOF>", "") ## 

157 

158 self.pos_in_stream = -1 

159 self.line = -1 

160 self.column = -1 

161 self._terminals_by_name = terminals_by_name 

162 

163 

164 def __str__(self): 

165 message = "Unexpected end-of-input. " 

166 message += self._format_expected(self.expected) 

167 return message 

168 

169 

170class UnexpectedCharacters(LexError, UnexpectedInput): 

171 #-- 

172 

173 allowed: Set[str] 

174 considered_tokens: Set[Any] 

175 

176 def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None, 

177 terminals_by_name=None, considered_rules=None): 

178 super(UnexpectedCharacters, self).__init__() 

179 

180 ## 

181 

182 self.line = line 

183 self.column = column 

184 self.pos_in_stream = lex_pos 

185 self.state = state 

186 self._terminals_by_name = terminals_by_name 

187 

188 self.allowed = allowed 

189 self.considered_tokens = considered_tokens 

190 self.considered_rules = considered_rules 

191 self.token_history = token_history 

192 

193 if isinstance(seq, bytes): 

194 self.char = seq[lex_pos:lex_pos + 1].decode("ascii", "backslashreplace") 

195 else: 

196 self.char = seq[lex_pos] 

197 self._context = self.get_context(seq) 

198 

199 

200 def __str__(self): 

201 message = "No terminal matches '%s' in the current parser context, at line %d col %d" % (self.char, self.line, self.column) 

202 message += '\n\n' + self._context 

203 if self.allowed: 

204 message += self._format_expected(self.allowed) 

205 if self.token_history: 

206 message += '\nPrevious tokens: %s\n' % ', '.join(repr(t) for t in self.token_history) 

207 return message 

208 

209 

210class UnexpectedToken(ParseError, UnexpectedInput): 

211 #-- 

212 

213 expected: Set[str] 

214 considered_rules: Set[str] 

215 interactive_parser: 'InteractiveParser' 

216 

217 def __init__(self, token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None): 

218 super(UnexpectedToken, self).__init__() 

219 

220 ## 

221 

222 self.line = getattr(token, 'line', '?') 

223 self.column = getattr(token, 'column', '?') 

224 self.pos_in_stream = getattr(token, 'start_pos', None) 

225 self.state = state 

226 

227 self.token = token 

228 self.expected = expected ## 

229 

230 self._accepts = NO_VALUE 

231 self.considered_rules = considered_rules 

232 self.interactive_parser = interactive_parser 

233 self._terminals_by_name = terminals_by_name 

234 self.token_history = token_history 

235 

236 

237 @property 

238 def accepts(self) -> Set[str]: 

239 if self._accepts is NO_VALUE: 

240 self._accepts = self.interactive_parser and self.interactive_parser.accepts() 

241 return self._accepts 

242 

243 def __str__(self): 

244 message = ("Unexpected token %r at line %s, column %s.\n%s" 

245 % (self.token, self.line, self.column, self._format_expected(self.accepts or self.expected))) 

246 if self.token_history: 

247 message += "Previous tokens: %r\n" % self.token_history 

248 

249 return message 

250 

251 

252 

253class VisitError(LarkError): 

254 #-- 

255 

256 obj: 'Union[Tree, Token]' 

257 orig_exc: Exception 

258 

259 def __init__(self, rule, obj, orig_exc): 

260 message = 'Error trying to process rule "%s":\n\n%s' % (rule, orig_exc) 

261 super(VisitError, self).__init__(message) 

262 

263 self.rule = rule 

264 self.obj = obj 

265 self.orig_exc = orig_exc 

266 

267 

268class MissingVariableError(LarkError): 

269 pass 

270 

271 

272import sys, re 

273import logging 

274logger: logging.Logger = logging.getLogger("lark") 

275logger.addHandler(logging.StreamHandler()) 

276## 

277 

278## 

279 

280logger.setLevel(logging.CRITICAL) 

281 

282 

283NO_VALUE = object() 

284 

285 

286def classify(seq, key=None, value=None): 

287 d = {} 

288 for item in seq: 

289 k = key(item) if (key is not None) else item 

290 v = value(item) if (value is not None) else item 

291 if k in d: 

292 d[k].append(v) 

293 else: 

294 d[k] = [v] 

295 return d 

296 

297 

298def _deserialize(data, namespace, memo): 

299 if isinstance(data, dict): 

300 if '__type__' in data: ## 

301 

302 class_ = namespace[data['__type__']] 

303 return class_.deserialize(data, memo) 

304 elif '@' in data: 

305 return memo[data['@']] 

306 return {key:_deserialize(value, namespace, memo) for key, value in data.items()} 

307 elif isinstance(data, list): 

308 return [_deserialize(value, namespace, memo) for value in data] 

309 return data 

310 

311 

312class Serialize: 

313 #-- 

314 

315 def memo_serialize(self, types_to_memoize): 

316 memo = SerializeMemoizer(types_to_memoize) 

317 return self.serialize(memo), memo.serialize() 

318 

319 def serialize(self, memo=None): 

320 if memo and memo.in_types(self): 

321 return {'@': memo.memoized.get(self)} 

322 

323 fields = getattr(self, '__serialize_fields__') 

324 res = {f: _serialize(getattr(self, f), memo) for f in fields} 

325 res['__type__'] = type(self).__name__ 

326 if hasattr(self, '_serialize'): 

327 self._serialize(res, memo) 

328 return res 

329 

330 @classmethod 

331 def deserialize(cls, data, memo): 

332 namespace = getattr(cls, '__serialize_namespace__', []) 

333 namespace = {c.__name__:c for c in namespace} 

334 

335 fields = getattr(cls, '__serialize_fields__') 

336 

337 if '@' in data: 

338 return memo[data['@']] 

339 

340 inst = cls.__new__(cls) 

341 for f in fields: 

342 try: 

343 setattr(inst, f, _deserialize(data[f], namespace, memo)) 

344 except KeyError as e: 

345 raise KeyError("Cannot find key for class", cls, e) 

346 

347 if hasattr(inst, '_deserialize'): 

348 inst._deserialize() 

349 

350 return inst 

351 

352 

353class SerializeMemoizer(Serialize): 

354 #-- 

355 

356 __serialize_fields__ = 'memoized', 

357 

358 def __init__(self, types_to_memoize): 

359 self.types_to_memoize = tuple(types_to_memoize) 

360 self.memoized = Enumerator() 

361 

362 def in_types(self, value): 

363 return isinstance(value, self.types_to_memoize) 

364 

365 def serialize(self): 

366 return _serialize(self.memoized.reversed(), None) 

367 

368 @classmethod 

369 def deserialize(cls, data, namespace, memo): 

370 return _deserialize(data, namespace, memo) 

371 

372 

373try: 

374 import regex ## 

375 

376except ImportError: 

377 regex = None 

378 

379import sre_parse 

380import sre_constants 

381categ_pattern = re.compile(r'\\p{[A-Za-z_]+}') 

382 

383def get_regexp_width(expr): 

384 if regex: 

385 ## 

386 

387 ## 

388 

389 ## 

390 

391 regexp_final = re.sub(categ_pattern, 'A', expr) 

392 else: 

393 if re.search(categ_pattern, expr): 

394 raise ImportError('`regex` module must be installed in order to use Unicode categories.', expr) 

395 regexp_final = expr 

396 try: 

397 return [int(x) for x in sre_parse.parse(regexp_final).getwidth()] 

398 except sre_constants.error: 

399 if not regex: 

400 raise ValueError(expr) 

401 else: 

402 ## 

403 

404 ## 

405 

406 c = regex.compile(regexp_final) 

407 if c.match('') is None: 

408 ## 

409 

410 return 1, int(sre_constants.MAXREPEAT) 

411 else: 

412 return 0, int(sre_constants.MAXREPEAT) 

413 

414 

415from collections import OrderedDict 

416 

417class Meta: 

418 

419 empty: bool 

420 line: int 

421 column: int 

422 start_pos: int 

423 end_line: int 

424 end_column: int 

425 end_pos: int 

426 orig_expansion: 'List[TerminalDef]' 

427 match_tree: bool 

428 

429 def __init__(self): 

430 self.empty = True 

431 

432 

433_Leaf_T = TypeVar("_Leaf_T") 

434Branch = Union[_Leaf_T, 'Tree[_Leaf_T]'] 

435 

436 

437class Tree(Generic[_Leaf_T]): 

438 #-- 

439 

440 data: str 

441 children: 'List[Branch[_Leaf_T]]' 

442 

443 def __init__(self, data: str, children: 'List[Branch[_Leaf_T]]', meta: Optional[Meta]=None) -> None: 

444 self.data = data 

445 self.children = children 

446 self._meta = meta 

447 

448 @property 

449 def meta(self) -> Meta: 

450 if self._meta is None: 

451 self._meta = Meta() 

452 return self._meta 

453 

454 def __repr__(self): 

455 return 'Tree(%r, %r)' % (self.data, self.children) 

456 

457 def _pretty_label(self): 

458 return self.data 

459 

460 def _pretty(self, level, indent_str): 

461 if len(self.children) == 1 and not isinstance(self.children[0], Tree): 

462 return [indent_str*level, self._pretty_label(), '\t', '%s' % (self.children[0],), '\n'] 

463 

464 l = [indent_str*level, self._pretty_label(), '\n'] 

465 for n in self.children: 

466 if isinstance(n, Tree): 

467 l += n._pretty(level+1, indent_str) 

468 else: 

469 l += [indent_str*(level+1), '%s' % (n,), '\n'] 

470 

471 return l 

472 

473 def pretty(self, indent_str: str=' ') -> str: 

474 #-- 

475 return ''.join(self._pretty(0, indent_str)) 

476 

477 def __rich__(self, parent:'rich.tree.Tree'=None) -> 'rich.tree.Tree': 

478 #-- 

479 return self._rich(parent) 

480 

481 def _rich(self, parent): 

482 if parent: 

483 tree = parent.add(f'[bold]{self.data}[/bold]') 

484 else: 

485 import rich.tree 

486 tree = rich.tree.Tree(self.data) 

487 

488 for c in self.children: 

489 if isinstance(c, Tree): 

490 c._rich(tree) 

491 else: 

492 tree.add(f'[green]{c}[/green]') 

493 

494 return tree 

495 

496 def __eq__(self, other): 

497 try: 

498 return self.data == other.data and self.children == other.children 

499 except AttributeError: 

500 return False 

501 

502 def __ne__(self, other): 

503 return not (self == other) 

504 

505 def __hash__(self) -> int: 

506 return hash((self.data, tuple(self.children))) 

507 

508 def iter_subtrees(self) -> 'Iterator[Tree[_Leaf_T]]': 

509 #-- 

510 queue = [self] 

511 subtrees = OrderedDict() 

512 for subtree in queue: 

513 subtrees[id(subtree)] = subtree 

514 ## 

515 

516 queue += [c for c in reversed(subtree.children) ## 

517 

518 if isinstance(c, Tree) and id(c) not in subtrees] 

519 

520 del queue 

521 return reversed(list(subtrees.values())) 

522 

523 def find_pred(self, pred: 'Callable[[Tree[_Leaf_T]], bool]') -> 'Iterator[Tree[_Leaf_T]]': 

524 #-- 

525 return filter(pred, self.iter_subtrees()) 

526 

527 def find_data(self, data: str) -> 'Iterator[Tree[_Leaf_T]]': 

528 #-- 

529 return self.find_pred(lambda t: t.data == data) 

530 

531 

532from functools import wraps, update_wrapper 

533from inspect import getmembers, getmro 

534 

535_Return_T = TypeVar('_Return_T') 

536_Return_V = TypeVar('_Return_V') 

537_Leaf_T = TypeVar('_Leaf_T') 

538_Leaf_U = TypeVar('_Leaf_U') 

539_R = TypeVar('_R') 

540_FUNC = Callable[..., _Return_T] 

541_DECORATED = Union[_FUNC, type] 

542 

543class _DiscardType: 

544 #-- 

545 

546 def __repr__(self): 

547 return "lark.visitors.Discard" 

548 

549Discard = _DiscardType() 

550 

551## 

552 

553 

554class _Decoratable: 

555 #-- 

556 

557 @classmethod 

558 def _apply_v_args(cls, visit_wrapper): 

559 mro = getmro(cls) 

560 assert mro[0] is cls 

561 libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)} 

562 for name, value in getmembers(cls): 

563 

564 ## 

565 

566 if name.startswith('_') or (name in libmembers and name not in cls.__dict__): 

567 continue 

568 if not callable(value): 

569 continue 

570 

571 ## 

572 

573 if isinstance(cls.__dict__[name], _VArgsWrapper): 

574 continue 

575 

576 setattr(cls, name, _VArgsWrapper(cls.__dict__[name], visit_wrapper)) 

577 return cls 

578 

579 def __class_getitem__(cls, _): 

580 return cls 

581 

582 

583class Transformer(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): 

584 #-- 

585 __visit_tokens__ = True ## 

586 

587 

588 def __init__(self, visit_tokens: bool=True) -> None: 

589 self.__visit_tokens__ = visit_tokens 

590 

591 def _call_userfunc(self, tree, new_children=None): 

592 ## 

593 

594 children = new_children if new_children is not None else tree.children 

595 try: 

596 f = getattr(self, tree.data) 

597 except AttributeError: 

598 return self.__default__(tree.data, children, tree.meta) 

599 else: 

600 try: 

601 wrapper = getattr(f, 'visit_wrapper', None) 

602 if wrapper is not None: 

603 return f.visit_wrapper(f, tree.data, children, tree.meta) 

604 else: 

605 return f(children) 

606 except GrammarError: 

607 raise 

608 except Exception as e: 

609 raise VisitError(tree.data, tree, e) 

610 

611 def _call_userfunc_token(self, token): 

612 try: 

613 f = getattr(self, token.type) 

614 except AttributeError: 

615 return self.__default_token__(token) 

616 else: 

617 try: 

618 return f(token) 

619 except GrammarError: 

620 raise 

621 except Exception as e: 

622 raise VisitError(token.type, token, e) 

623 

624 def _transform_children(self, children): 

625 for c in children: 

626 if isinstance(c, Tree): 

627 res = self._transform_tree(c) 

628 elif self.__visit_tokens__ and isinstance(c, Token): 

629 res = self._call_userfunc_token(c) 

630 else: 

631 res = c 

632 

633 if res is not Discard: 

634 yield res 

635 

636 def _transform_tree(self, tree): 

637 children = list(self._transform_children(tree.children)) 

638 return self._call_userfunc(tree, children) 

639 

640 def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: 

641 #-- 

642 return self._transform_tree(tree) 

643 

644 def __mul__( 

645 self: 'Transformer[_Leaf_T, Tree[_Leaf_U]]', 

646 other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V,]]' 

647 ) -> 'TransformerChain[_Leaf_T, _Return_V]': 

648 #-- 

649 return TransformerChain(self, other) 

650 

651 def __default__(self, data, children, meta): 

652 #-- 

653 return Tree(data, children, meta) 

654 

655 def __default_token__(self, token): 

656 #-- 

657 return token 

658 

659 

660def merge_transformers(base_transformer=None, **transformers_to_merge): 

661 #-- 

662 if base_transformer is None: 

663 base_transformer = Transformer() 

664 for prefix, transformer in transformers_to_merge.items(): 

665 for method_name in dir(transformer): 

666 method = getattr(transformer, method_name) 

667 if not callable(method): 

668 continue 

669 if method_name.startswith("_") or method_name == "transform": 

670 continue 

671 prefixed_method = prefix + "__" + method_name 

672 if hasattr(base_transformer, prefixed_method): 

673 raise AttributeError("Cannot merge: method '%s' appears more than once" % prefixed_method) 

674 

675 setattr(base_transformer, prefixed_method, method) 

676 

677 return base_transformer 

678 

679 

680class InlineTransformer(Transformer): ## 

681 

682 def _call_userfunc(self, tree, new_children=None): 

683 ## 

684 

685 children = new_children if new_children is not None else tree.children 

686 try: 

687 f = getattr(self, tree.data) 

688 except AttributeError: 

689 return self.__default__(tree.data, children, tree.meta) 

690 else: 

691 return f(*children) 

692 

693 

694class TransformerChain(Generic[_Leaf_T, _Return_T]): 

695 

696 transformers: 'Tuple[Union[Transformer, TransformerChain], ...]' 

697 

698 def __init__(self, *transformers: 'Union[Transformer, TransformerChain]') -> None: 

699 self.transformers = transformers 

700 

701 def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: 

702 for t in self.transformers: 

703 tree = t.transform(tree) 

704 return cast(_Return_T, tree) 

705 

706 def __mul__( 

707 self: 'TransformerChain[_Leaf_T, Tree[_Leaf_U]]', 

708 other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V]]' 

709 ) -> 'TransformerChain[_Leaf_T, _Return_V]': 

710 return TransformerChain(*self.transformers + (other,)) 

711 

712 

713class Transformer_InPlace(Transformer): 

714 #-- 

715 def _transform_tree(self, tree): ## 

716 

717 return self._call_userfunc(tree) 

718 

719 def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: 

720 for subtree in tree.iter_subtrees(): 

721 subtree.children = list(self._transform_children(subtree.children)) 

722 

723 return self._transform_tree(tree) 

724 

725 

726class Transformer_NonRecursive(Transformer): 

727 #-- 

728 

729 def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: 

730 ## 

731 

732 rev_postfix = [] 

733 q: List[Branch[_Leaf_T]] = [tree] 

734 while q: 

735 t = q.pop() 

736 rev_postfix.append(t) 

737 if isinstance(t, Tree): 

738 q += t.children 

739 

740 ## 

741 

742 stack: List = [] 

743 for x in reversed(rev_postfix): 

744 if isinstance(x, Tree): 

745 size = len(x.children) 

746 if size: 

747 args = stack[-size:] 

748 del stack[-size:] 

749 else: 

750 args = [] 

751 

752 res = self._call_userfunc(x, args) 

753 if res is not Discard: 

754 stack.append(res) 

755 

756 elif self.__visit_tokens__ and isinstance(x, Token): 

757 res = self._call_userfunc_token(x) 

758 if res is not Discard: 

759 stack.append(res) 

760 else: 

761 stack.append(x) 

762 

763 result, = stack ## 

764 

765 ## 

766 

767 ## 

768 

769 ## 

770 

771 return cast(_Return_T, result) 

772 

773 

774class Transformer_InPlaceRecursive(Transformer): 

775 #-- 

776 def _transform_tree(self, tree): 

777 tree.children = list(self._transform_children(tree.children)) 

778 return self._call_userfunc(tree) 

779 

780 

781## 

782 

783 

784class VisitorBase: 

785 def _call_userfunc(self, tree): 

786 return getattr(self, tree.data, self.__default__)(tree) 

787 

788 def __default__(self, tree): 

789 #-- 

790 return tree 

791 

792 def __class_getitem__(cls, _): 

793 return cls 

794 

795 

796class Visitor(VisitorBase, ABC, Generic[_Leaf_T]): 

797 #-- 

798 

799 def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: 

800 #-- 

801 for subtree in tree.iter_subtrees(): 

802 self._call_userfunc(subtree) 

803 return tree 

804 

805 def visit_topdown(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: 

806 #-- 

807 for subtree in tree.iter_subtrees_topdown(): 

808 self._call_userfunc(subtree) 

809 return tree 

810 

811 

812class Visitor_Recursive(VisitorBase, Generic[_Leaf_T]): 

813 #-- 

814 

815 def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: 

816 #-- 

817 for child in tree.children: 

818 if isinstance(child, Tree): 

819 self.visit(child) 

820 

821 self._call_userfunc(tree) 

822 return tree 

823 

824 def visit_topdown(self,tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: 

825 #-- 

826 self._call_userfunc(tree) 

827 

828 for child in tree.children: 

829 if isinstance(child, Tree): 

830 self.visit_topdown(child) 

831 

832 return tree 

833 

834 

835class Interpreter(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): 

836 #-- 

837 

838 def visit(self, tree: Tree[_Leaf_T]) -> _Return_T: 

839 ## 

840 

841 ## 

842 

843 ## 

844 

845 return self._visit_tree(tree) 

846 

847 def _visit_tree(self, tree: Tree[_Leaf_T]): 

848 f = getattr(self, tree.data) 

849 wrapper = getattr(f, 'visit_wrapper', None) 

850 if wrapper is not None: 

851 return f.visit_wrapper(f, tree.data, tree.children, tree.meta) 

852 else: 

853 return f(tree) 

854 

855 def visit_children(self, tree: Tree[_Leaf_T]) -> List: 

856 return [self._visit_tree(child) if isinstance(child, Tree) else child 

857 for child in tree.children] 

858 

859 def __getattr__(self, name): 

860 return self.__default__ 

861 

862 def __default__(self, tree): 

863 return self.visit_children(tree) 

864 

865 

866_InterMethod = Callable[[Type[Interpreter], _Return_T], _R] 

867 

868def visit_children_decor(func: _InterMethod) -> _InterMethod: 

869 #-- 

870 @wraps(func) 

871 def inner(cls, tree): 

872 values = cls.visit_children(tree) 

873 return func(cls, values) 

874 return inner 

875 

876## 

877 

878 

879def _apply_v_args(obj, visit_wrapper): 

880 try: 

881 _apply = obj._apply_v_args 

882 except AttributeError: 

883 return _VArgsWrapper(obj, visit_wrapper) 

884 else: 

885 return _apply(visit_wrapper) 

886 

887 

888class _VArgsWrapper: 

889 #-- 

890 base_func: Callable 

891 

892 def __init__(self, func: Callable, visit_wrapper: Callable[[Callable, str, list, Any], Any]): 

893 if isinstance(func, _VArgsWrapper): 

894 func = func.base_func 

895 ## 

896 

897 self.base_func = func ## 

898 

899 self.visit_wrapper = visit_wrapper 

900 update_wrapper(self, func) 

901 

902 def __call__(self, *args, **kwargs): 

903 return self.base_func(*args, **kwargs) 

904 

905 def __get__(self, instance, owner=None): 

906 try: 

907 g = self.base_func.__get__ 

908 except AttributeError: 

909 return self 

910 else: 

911 return _VArgsWrapper(g(instance, owner), self.visit_wrapper) 

912 

913 def __set_name__(self, owner, name): 

914 try: 

915 f = self.base_func.__set_name__ 

916 except AttributeError: 

917 return 

918 else: 

919 f(owner, name) 

920 

921 

922def _vargs_inline(f, _data, children, _meta): 

923 return f(*children) 

924def _vargs_meta_inline(f, _data, children, meta): 

925 return f(meta, *children) 

926def _vargs_meta(f, _data, children, meta): 

927 return f(meta, children) 

928def _vargs_tree(f, data, children, meta): 

929 return f(Tree(data, children, meta)) 

930 

931 

932def v_args(inline: bool = False, meta: bool = False, tree: bool = False, wrapper: Optional[Callable] = None) -> Callable[[_DECORATED], _DECORATED]: 

933 #-- 

934 if tree and (meta or inline): 

935 raise ValueError("Visitor functions cannot combine 'tree' with 'meta' or 'inline'.") 

936 

937 func = None 

938 if meta: 

939 if inline: 

940 func = _vargs_meta_inline 

941 else: 

942 func = _vargs_meta 

943 elif inline: 

944 func = _vargs_inline 

945 elif tree: 

946 func = _vargs_tree 

947 

948 if wrapper is not None: 

949 if func is not None: 

950 raise ValueError("Cannot use 'wrapper' along with 'tree', 'meta' or 'inline'.") 

951 func = wrapper 

952 

953 def _visitor_args_dec(obj): 

954 return _apply_v_args(obj, func) 

955 return _visitor_args_dec 

956 

957 

958 

959TOKEN_DEFAULT_PRIORITY = 0 

960 

961 

962class Symbol(Serialize): 

963 __slots__ = ('name',) 

964 

965 name: str 

966 is_term: ClassVar[bool] = NotImplemented 

967 

968 def __init__(self, name: str) -> None: 

969 self.name = name 

970 

971 def __eq__(self, other): 

972 assert isinstance(other, Symbol), other 

973 return self.is_term == other.is_term and self.name == other.name 

974 

975 def __ne__(self, other): 

976 return not (self == other) 

977 

978 def __hash__(self): 

979 return hash(self.name) 

980 

981 def __repr__(self): 

982 return '%s(%r)' % (type(self).__name__, self.name) 

983 

984 fullrepr = property(__repr__) 

985 

986 def renamed(self, f): 

987 return type(self)(f(self.name)) 

988 

989 

990class Terminal(Symbol): 

991 __serialize_fields__ = 'name', 'filter_out' 

992 

993 is_term: ClassVar[bool] = True 

994 

995 def __init__(self, name, filter_out=False): 

996 self.name = name 

997 self.filter_out = filter_out 

998 

999 @property 

1000 def fullrepr(self): 

1001 return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out) 

1002 

1003 def renamed(self, f): 

1004 return type(self)(f(self.name), self.filter_out) 

1005 

1006 

1007class NonTerminal(Symbol): 

1008 __serialize_fields__ = 'name', 

1009 

1010 is_term: ClassVar[bool] = False 

1011 

1012 

1013class RuleOptions(Serialize): 

1014 __serialize_fields__ = 'keep_all_tokens', 'expand1', 'priority', 'template_source', 'empty_indices' 

1015 

1016 keep_all_tokens: bool 

1017 expand1: bool 

1018 priority: Optional[int] 

1019 template_source: Optional[str] 

1020 empty_indices: Tuple[bool, ...] 

1021 

1022 def __init__(self, keep_all_tokens: bool=False, expand1: bool=False, priority: Optional[int]=None, template_source: Optional[str]=None, empty_indices: Tuple[bool, ...]=()) -> None: 

1023 self.keep_all_tokens = keep_all_tokens 

1024 self.expand1 = expand1 

1025 self.priority = priority 

1026 self.template_source = template_source 

1027 self.empty_indices = empty_indices 

1028 

1029 def __repr__(self): 

1030 return 'RuleOptions(%r, %r, %r, %r)' % ( 

1031 self.keep_all_tokens, 

1032 self.expand1, 

1033 self.priority, 

1034 self.template_source 

1035 ) 

1036 

1037 

1038class Rule(Serialize): 

1039 #-- 

1040 __slots__ = ('origin', 'expansion', 'alias', 'options', 'order', '_hash') 

1041 

1042 __serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options' 

1043 __serialize_namespace__ = Terminal, NonTerminal, RuleOptions 

1044 

1045 def __init__(self, origin, expansion, order=0, alias=None, options=None): 

1046 self.origin = origin 

1047 self.expansion = expansion 

1048 self.alias = alias 

1049 self.order = order 

1050 self.options = options or RuleOptions() 

1051 self._hash = hash((self.origin, tuple(self.expansion))) 

1052 

1053 def _deserialize(self): 

1054 self._hash = hash((self.origin, tuple(self.expansion))) 

1055 

1056 def __str__(self): 

1057 return '<%s : %s>' % (self.origin.name, ' '.join(x.name for x in self.expansion)) 

1058 

1059 def __repr__(self): 

1060 return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options) 

1061 

1062 def __hash__(self): 

1063 return self._hash 

1064 

1065 def __eq__(self, other): 

1066 if not isinstance(other, Rule): 

1067 return False 

1068 return self.origin == other.origin and self.expansion == other.expansion 

1069 

1070 

1071 

1072from copy import copy 

1073 

1074 

1075class Pattern(Serialize, ABC): 

1076 

1077 value: str 

1078 flags: Collection[str] 

1079 raw: Optional[str] 

1080 type: ClassVar[str] 

1081 

1082 def __init__(self, value: str, flags: Collection[str]=(), raw: Optional[str]=None) -> None: 

1083 self.value = value 

1084 self.flags = frozenset(flags) 

1085 self.raw = raw 

1086 

1087 def __repr__(self): 

1088 return repr(self.to_regexp()) 

1089 

1090 ## 

1091 

1092 def __hash__(self): 

1093 return hash((type(self), self.value, self.flags)) 

1094 

1095 def __eq__(self, other): 

1096 return type(self) == type(other) and self.value == other.value and self.flags == other.flags 

1097 

1098 @abstractmethod 

1099 def to_regexp(self) -> str: 

1100 raise NotImplementedError() 

1101 

1102 @property 

1103 @abstractmethod 

1104 def min_width(self) -> int: 

1105 raise NotImplementedError() 

1106 

1107 @property 

1108 @abstractmethod 

1109 def max_width(self) -> int: 

1110 raise NotImplementedError() 

1111 

1112 def _get_flags(self, value): 

1113 for f in self.flags: 

1114 value = ('(?%s:%s)' % (f, value)) 

1115 return value 

1116 

1117 

1118class PatternStr(Pattern): 

1119 __serialize_fields__ = 'value', 'flags' 

1120 

1121 type: ClassVar[str] = "str" 

1122 

1123 def to_regexp(self) -> str: 

1124 return self._get_flags(re.escape(self.value)) 

1125 

1126 @property 

1127 def min_width(self) -> int: 

1128 return len(self.value) 

1129 

1130 @property 

1131 def max_width(self) -> int: 

1132 return len(self.value) 

1133 

1134 

1135class PatternRE(Pattern): 

1136 __serialize_fields__ = 'value', 'flags', '_width' 

1137 

1138 type: ClassVar[str] = "re" 

1139 

1140 def to_regexp(self) -> str: 

1141 return self._get_flags(self.value) 

1142 

1143 _width = None 

1144 def _get_width(self): 

1145 if self._width is None: 

1146 self._width = get_regexp_width(self.to_regexp()) 

1147 return self._width 

1148 

1149 @property 

1150 def min_width(self) -> int: 

1151 return self._get_width()[0] 

1152 

1153 @property 

1154 def max_width(self) -> int: 

1155 return self._get_width()[1] 

1156 

1157 

1158class TerminalDef(Serialize): 

1159 __serialize_fields__ = 'name', 'pattern', 'priority' 

1160 __serialize_namespace__ = PatternStr, PatternRE 

1161 

1162 name: str 

1163 pattern: Pattern 

1164 priority: int 

1165 

1166 def __init__(self, name: str, pattern: Pattern, priority: int=TOKEN_DEFAULT_PRIORITY) -> None: 

1167 assert isinstance(pattern, Pattern), pattern 

1168 self.name = name 

1169 self.pattern = pattern 

1170 self.priority = priority 

1171 

1172 def __repr__(self): 

1173 return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern) 

1174 

1175 def user_repr(self) -> str: 

1176 if self.name.startswith('__'): ## 

1177 

1178 return self.pattern.raw or self.name 

1179 else: 

1180 return self.name 

1181 

1182_T = TypeVar('_T', bound="Token") 

1183 

1184class Token(str): 

1185 #-- 

1186 __slots__ = ('type', 'start_pos', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos') 

1187 

1188 type: str 

1189 start_pos: int 

1190 value: Any 

1191 line: int 

1192 column: int 

1193 end_line: int 

1194 end_column: int 

1195 end_pos: int 

1196 

1197 def __new__(cls, type_, value, start_pos=None, line=None, column=None, end_line=None, end_column=None, end_pos=None): 

1198 inst = super(Token, cls).__new__(cls, value) 

1199 inst.type = type_ 

1200 inst.start_pos = start_pos 

1201 inst.value = value 

1202 inst.line = line 

1203 inst.column = column 

1204 inst.end_line = end_line 

1205 inst.end_column = end_column 

1206 inst.end_pos = end_pos 

1207 return inst 

1208 

1209 def update(self, type_: Optional[str]=None, value: Optional[Any]=None) -> 'Token': 

1210 return Token.new_borrow_pos( 

1211 type_ if type_ is not None else self.type, 

1212 value if value is not None else self.value, 

1213 self 

1214 ) 

1215 

1216 @classmethod 

1217 def new_borrow_pos(cls: Type[_T], type_: str, value: Any, borrow_t: 'Token') -> _T: 

1218 return cls(type_, value, borrow_t.start_pos, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos) 

1219 

1220 def __reduce__(self): 

1221 return (self.__class__, (self.type, self.value, self.start_pos, self.line, self.column)) 

1222 

1223 def __repr__(self): 

1224 return 'Token(%r, %r)' % (self.type, self.value) 

1225 

1226 def __deepcopy__(self, memo): 

1227 return Token(self.type, self.value, self.start_pos, self.line, self.column) 

1228 

1229 def __eq__(self, other): 

1230 if isinstance(other, Token) and self.type != other.type: 

1231 return False 

1232 

1233 return str.__eq__(self, other) 

1234 

1235 __hash__ = str.__hash__ 

1236 

1237 

1238class LineCounter: 

1239 __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char' 

1240 

1241 def __init__(self, newline_char): 

1242 self.newline_char = newline_char 

1243 self.char_pos = 0 

1244 self.line = 1 

1245 self.column = 1 

1246 self.line_start_pos = 0 

1247 

1248 def __eq__(self, other): 

1249 if not isinstance(other, LineCounter): 

1250 return NotImplemented 

1251 

1252 return self.char_pos == other.char_pos and self.newline_char == other.newline_char 

1253 

1254 def feed(self, token: Token, test_newline=True): 

1255 #-- 

1256 if test_newline: 

1257 newlines = token.count(self.newline_char) 

1258 if newlines: 

1259 self.line += newlines 

1260 self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1 

1261 

1262 self.char_pos += len(token) 

1263 self.column = self.char_pos - self.line_start_pos + 1 

1264 

1265 

1266class UnlessCallback: 

1267 def __init__(self, scanner): 

1268 self.scanner = scanner 

1269 

1270 def __call__(self, t): 

1271 res = self.scanner.match(t.value, 0) 

1272 if res: 

1273 _value, t.type = res 

1274 return t 

1275 

1276 

1277class CallChain: 

1278 def __init__(self, callback1, callback2, cond): 

1279 self.callback1 = callback1 

1280 self.callback2 = callback2 

1281 self.cond = cond 

1282 

1283 def __call__(self, t): 

1284 t2 = self.callback1(t) 

1285 return self.callback2(t) if self.cond(t2) else t2 

1286 

1287 

1288def _get_match(re_, regexp, s, flags): 

1289 m = re_.match(regexp, s, flags) 

1290 if m: 

1291 return m.group(0) 

1292 

1293def _create_unless(terminals, g_regex_flags, re_, use_bytes): 

1294 tokens_by_type = classify(terminals, lambda t: type(t.pattern)) 

1295 assert len(tokens_by_type) <= 2, tokens_by_type.keys() 

1296 embedded_strs = set() 

1297 callback = {} 

1298 for retok in tokens_by_type.get(PatternRE, []): 

1299 unless = [] 

1300 for strtok in tokens_by_type.get(PatternStr, []): 

1301 if strtok.priority != retok.priority: 

1302 continue 

1303 s = strtok.pattern.value 

1304 if s == _get_match(re_, retok.pattern.to_regexp(), s, g_regex_flags): 

1305 unless.append(strtok) 

1306 if strtok.pattern.flags <= retok.pattern.flags: 

1307 embedded_strs.add(strtok) 

1308 if unless: 

1309 callback[retok.name] = UnlessCallback(Scanner(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes)) 

1310 

1311 new_terminals = [t for t in terminals if t not in embedded_strs] 

1312 return new_terminals, callback 

1313 

1314 

1315class Scanner: 

1316 def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): 

1317 self.terminals = terminals 

1318 self.g_regex_flags = g_regex_flags 

1319 self.re_ = re_ 

1320 self.use_bytes = use_bytes 

1321 self.match_whole = match_whole 

1322 

1323 self.allowed_types = {t.name for t in self.terminals} 

1324 

1325 self._mres = self._build_mres(terminals, len(terminals)) 

1326 

1327 def _build_mres(self, terminals, max_size): 

1328 ## 

1329 

1330 ## 

1331 

1332 ## 

1333 

1334 postfix = '$' if self.match_whole else '' 

1335 mres = [] 

1336 while terminals: 

1337 pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size]) 

1338 if self.use_bytes: 

1339 pattern = pattern.encode('latin-1') 

1340 try: 

1341 mre = self.re_.compile(pattern, self.g_regex_flags) 

1342 except AssertionError: ## 

1343 

1344 return self._build_mres(terminals, max_size//2) 

1345 

1346 mres.append((mre, {i: n for n, i in mre.groupindex.items()})) 

1347 terminals = terminals[max_size:] 

1348 return mres 

1349 

1350 def match(self, text, pos): 

1351 for mre, type_from_index in self._mres: 

1352 m = mre.match(text, pos) 

1353 if m: 

1354 return m.group(0), type_from_index[m.lastindex] 

1355 

1356 

1357def _regexp_has_newline(r: str): 

1358 #-- 

1359 return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r) 

1360 

1361 

1362class LexerState: 

1363 #-- 

1364 

1365 __slots__ = 'text', 'line_ctr', 'last_token' 

1366 

1367 def __init__(self, text, line_ctr=None, last_token=None): 

1368 self.text = text 

1369 self.line_ctr = line_ctr or LineCounter(b'\n' if isinstance(text, bytes) else '\n') 

1370 self.last_token = last_token 

1371 

1372 def __eq__(self, other): 

1373 if not isinstance(other, LexerState): 

1374 return NotImplemented 

1375 

1376 return self.text is other.text and self.line_ctr == other.line_ctr and self.last_token == other.last_token 

1377 

1378 def __copy__(self): 

1379 return type(self)(self.text, copy(self.line_ctr), self.last_token) 

1380 

1381 

1382class LexerThread: 

1383 #-- 

1384 

1385 def __init__(self, lexer: 'Lexer', lexer_state: LexerState): 

1386 self.lexer = lexer 

1387 self.state = lexer_state 

1388 

1389 @classmethod 

1390 def from_text(cls, lexer: 'Lexer', text: str): 

1391 return cls(lexer, LexerState(text)) 

1392 

1393 def lex(self, parser_state): 

1394 return self.lexer.lex(self.state, parser_state) 

1395 

1396 def __copy__(self): 

1397 return type(self)(self.lexer, copy(self.state)) 

1398 

1399 _Token = Token 

1400 

1401 

1402_Callback = Callable[[Token], Token] 

1403 

1404class Lexer(ABC): 

1405 #-- 

1406 @abstractmethod 

1407 def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: 

1408 return NotImplemented 

1409 

1410 def make_lexer_state(self, text): 

1411 #-- 

1412 return LexerState(text) 

1413 

1414 

1415class BasicLexer(Lexer): 

1416 

1417 terminals: Collection[TerminalDef] 

1418 ignore_types: FrozenSet[str] 

1419 newline_types: FrozenSet[str] 

1420 user_callbacks: Dict[str, _Callback] 

1421 callback: Dict[str, _Callback] 

1422 re: ModuleType 

1423 

1424 def __init__(self, conf: 'LexerConf') -> None: 

1425 terminals = list(conf.terminals) 

1426 assert all(isinstance(t, TerminalDef) for t in terminals), terminals 

1427 

1428 self.re = conf.re_module 

1429 

1430 if not conf.skip_validation: 

1431 ## 

1432 

1433 for t in terminals: 

1434 try: 

1435 self.re.compile(t.pattern.to_regexp(), conf.g_regex_flags) 

1436 except self.re.error: 

1437 raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern)) 

1438 

1439 if t.pattern.min_width == 0: 

1440 raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern)) 

1441 

1442 if not (set(conf.ignore) <= {t.name for t in terminals}): 

1443 raise LexError("Ignore terminals are not defined: %s" % (set(conf.ignore) - {t.name for t in terminals})) 

1444 

1445 ## 

1446 

1447 self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())) 

1448 self.ignore_types = frozenset(conf.ignore) 

1449 

1450 terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name)) 

1451 self.terminals = terminals 

1452 self.user_callbacks = conf.callbacks 

1453 self.g_regex_flags = conf.g_regex_flags 

1454 self.use_bytes = conf.use_bytes 

1455 self.terminals_by_name = conf.terminals_by_name 

1456 

1457 self._scanner = None 

1458 

1459 def _build_scanner(self): 

1460 terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes) 

1461 assert all(self.callback.values()) 

1462 

1463 for type_, f in self.user_callbacks.items(): 

1464 if type_ in self.callback: 

1465 ## 

1466 

1467 self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_) 

1468 else: 

1469 self.callback[type_] = f 

1470 

1471 self._scanner = Scanner(terminals, self.g_regex_flags, self.re, self.use_bytes) 

1472 

1473 @property 

1474 def scanner(self): 

1475 if self._scanner is None: 

1476 self._build_scanner() 

1477 return self._scanner 

1478 

1479 def match(self, text, pos): 

1480 return self.scanner.match(text, pos) 

1481 

1482 def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]: 

1483 with suppress(EOFError): 

1484 while True: 

1485 yield self.next_token(state, parser_state) 

1486 

1487 def next_token(self, lex_state: LexerState, parser_state: Any=None) -> Token: 

1488 line_ctr = lex_state.line_ctr 

1489 while line_ctr.char_pos < len(lex_state.text): 

1490 res = self.match(lex_state.text, line_ctr.char_pos) 

1491 if not res: 

1492 allowed = self.scanner.allowed_types - self.ignore_types 

1493 if not allowed: 

1494 allowed = {"<END-OF-FILE>"} 

1495 raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column, 

1496 allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token], 

1497 state=parser_state, terminals_by_name=self.terminals_by_name) 

1498 

1499 value, type_ = res 

1500 

1501 if type_ not in self.ignore_types: 

1502 t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) 

1503 line_ctr.feed(value, type_ in self.newline_types) 

1504 t.end_line = line_ctr.line 

1505 t.end_column = line_ctr.column 

1506 t.end_pos = line_ctr.char_pos 

1507 if t.type in self.callback: 

1508 t = self.callback[t.type](t) 

1509 if not isinstance(t, Token): 

1510 raise LexError("Callbacks must return a token (returned %r)" % t) 

1511 lex_state.last_token = t 

1512 return t 

1513 else: 

1514 if type_ in self.callback: 

1515 t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) 

1516 self.callback[type_](t2) 

1517 line_ctr.feed(value, type_ in self.newline_types) 

1518 

1519 ## 

1520 

1521 raise EOFError(self) 

1522 

1523 

1524class ContextualLexer(Lexer): 

1525 

1526 lexers: Dict[str, BasicLexer] 

1527 root_lexer: BasicLexer 

1528 

1529 def __init__(self, conf: 'LexerConf', states: Dict[str, Collection[str]], always_accept: Collection[str]=()) -> None: 

1530 terminals = list(conf.terminals) 

1531 terminals_by_name = conf.terminals_by_name 

1532 

1533 trad_conf = copy(conf) 

1534 trad_conf.terminals = terminals 

1535 

1536 lexer_by_tokens: Dict[FrozenSet[str], BasicLexer] = {} 

1537 self.lexers = {} 

1538 for state, accepts in states.items(): 

1539 key = frozenset(accepts) 

1540 try: 

1541 lexer = lexer_by_tokens[key] 

1542 except KeyError: 

1543 accepts = set(accepts) | set(conf.ignore) | set(always_accept) 

1544 lexer_conf = copy(trad_conf) 

1545 lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name] 

1546 lexer = BasicLexer(lexer_conf) 

1547 lexer_by_tokens[key] = lexer 

1548 

1549 self.lexers[state] = lexer 

1550 

1551 assert trad_conf.terminals is terminals 

1552 self.root_lexer = BasicLexer(trad_conf) 

1553 

1554 def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: 

1555 try: 

1556 while True: 

1557 lexer = self.lexers[parser_state.position] 

1558 yield lexer.next_token(lexer_state, parser_state) 

1559 except EOFError: 

1560 pass 

1561 except UnexpectedCharacters as e: 

1562 ## 

1563 

1564 ## 

1565 

1566 try: 

1567 last_token = lexer_state.last_token ## 

1568 

1569 token = self.root_lexer.next_token(lexer_state, parser_state) 

1570 raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name) 

1571 except UnexpectedCharacters: 

1572 raise e ## 

1573 

1574 

1575 

1576 

1577_ParserArgType: 'TypeAlias' = 'Literal["earley", "lalr", "cyk", "auto"]' 

1578_LexerArgType: 'TypeAlias' = 'Union[Literal["auto", "basic", "contextual", "dynamic", "dynamic_complete"], Type[Lexer]]' 

1579_Callback = Callable[[Token], Token] 

1580 

1581class LexerConf(Serialize): 

1582 __serialize_fields__ = 'terminals', 'ignore', 'g_regex_flags', 'use_bytes', 'lexer_type' 

1583 __serialize_namespace__ = TerminalDef, 

1584 

1585 terminals: Collection[TerminalDef] 

1586 re_module: ModuleType 

1587 ignore: Collection[str] 

1588 postlex: 'Optional[PostLex]' 

1589 callbacks: Dict[str, _Callback] 

1590 g_regex_flags: int 

1591 skip_validation: bool 

1592 use_bytes: bool 

1593 lexer_type: Optional[_LexerArgType] 

1594 

1595 def __init__(self, terminals: Collection[TerminalDef], re_module: ModuleType, ignore: Collection[str]=(), postlex: 'Optional[PostLex]'=None, callbacks: Optional[Dict[str, _Callback]]=None, g_regex_flags: int=0, skip_validation: bool=False, use_bytes: bool=False): 

1596 self.terminals = terminals 

1597 self.terminals_by_name = {t.name: t for t in self.terminals} 

1598 assert len(self.terminals) == len(self.terminals_by_name) 

1599 self.ignore = ignore 

1600 self.postlex = postlex 

1601 self.callbacks = callbacks or {} 

1602 self.g_regex_flags = g_regex_flags 

1603 self.re_module = re_module 

1604 self.skip_validation = skip_validation 

1605 self.use_bytes = use_bytes 

1606 self.lexer_type = None 

1607 

1608 def _deserialize(self): 

1609 self.terminals_by_name = {t.name: t for t in self.terminals} 

1610 

1611 def __deepcopy__(self, memo=None): 

1612 return type(self)( 

1613 deepcopy(self.terminals, memo), 

1614 self.re_module, 

1615 deepcopy(self.ignore, memo), 

1616 deepcopy(self.postlex, memo), 

1617 deepcopy(self.callbacks, memo), 

1618 deepcopy(self.g_regex_flags, memo), 

1619 deepcopy(self.skip_validation, memo), 

1620 deepcopy(self.use_bytes, memo), 

1621 ) 

1622 

1623 

1624class ParserConf(Serialize): 

1625 __serialize_fields__ = 'rules', 'start', 'parser_type' 

1626 

1627 def __init__(self, rules, callbacks, start): 

1628 assert isinstance(start, list) 

1629 self.rules = rules 

1630 self.callbacks = callbacks 

1631 self.start = start 

1632 

1633 self.parser_type = None 

1634 

1635 

1636from functools import partial, wraps 

1637from itertools import repeat, product 

1638 

1639 

1640class ExpandSingleChild: 

1641 def __init__(self, node_builder): 

1642 self.node_builder = node_builder 

1643 

1644 def __call__(self, children): 

1645 if len(children) == 1: 

1646 return children[0] 

1647 else: 

1648 return self.node_builder(children) 

1649 

1650 

1651 

1652class PropagatePositions: 

1653 def __init__(self, node_builder, node_filter=None): 

1654 self.node_builder = node_builder 

1655 self.node_filter = node_filter 

1656 

1657 def __call__(self, children): 

1658 res = self.node_builder(children) 

1659 

1660 if isinstance(res, Tree): 

1661 ## 

1662 

1663 ## 

1664 

1665 ## 

1666 

1667 ## 

1668 

1669 

1670 res_meta = res.meta 

1671 

1672 first_meta = self._pp_get_meta(children) 

1673 if first_meta is not None: 

1674 if not hasattr(res_meta, 'line'): 

1675 ## 

1676 

1677 res_meta.line = getattr(first_meta, 'container_line', first_meta.line) 

1678 res_meta.column = getattr(first_meta, 'container_column', first_meta.column) 

1679 res_meta.start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos) 

1680 res_meta.empty = False 

1681 

1682 res_meta.container_line = getattr(first_meta, 'container_line', first_meta.line) 

1683 res_meta.container_column = getattr(first_meta, 'container_column', first_meta.column) 

1684 

1685 last_meta = self._pp_get_meta(reversed(children)) 

1686 if last_meta is not None: 

1687 if not hasattr(res_meta, 'end_line'): 

1688 res_meta.end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) 

1689 res_meta.end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) 

1690 res_meta.end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos) 

1691 res_meta.empty = False 

1692 

1693 res_meta.container_end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) 

1694 res_meta.container_end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) 

1695 

1696 return res 

1697 

1698 def _pp_get_meta(self, children): 

1699 for c in children: 

1700 if self.node_filter is not None and not self.node_filter(c): 

1701 continue 

1702 if isinstance(c, Tree): 

1703 if not c.meta.empty: 

1704 return c.meta 

1705 elif isinstance(c, Token): 

1706 return c 

1707 

1708def make_propagate_positions(option): 

1709 if callable(option): 

1710 return partial(PropagatePositions, node_filter=option) 

1711 elif option is True: 

1712 return PropagatePositions 

1713 elif option is False: 

1714 return None 

1715 

1716 raise ConfigurationError('Invalid option for propagate_positions: %r' % option) 

1717 

1718 

1719class ChildFilter: 

1720 def __init__(self, to_include, append_none, node_builder): 

1721 self.node_builder = node_builder 

1722 self.to_include = to_include 

1723 self.append_none = append_none 

1724 

1725 def __call__(self, children): 

1726 filtered = [] 

1727 

1728 for i, to_expand, add_none in self.to_include: 

1729 if add_none: 

1730 filtered += [None] * add_none 

1731 if to_expand: 

1732 filtered += children[i].children 

1733 else: 

1734 filtered.append(children[i]) 

1735 

1736 if self.append_none: 

1737 filtered += [None] * self.append_none 

1738 

1739 return self.node_builder(filtered) 

1740 

1741 

1742class ChildFilterLALR(ChildFilter): 

1743 #-- 

1744 

1745 def __call__(self, children): 

1746 filtered = [] 

1747 for i, to_expand, add_none in self.to_include: 

1748 if add_none: 

1749 filtered += [None] * add_none 

1750 if to_expand: 

1751 if filtered: 

1752 filtered += children[i].children 

1753 else: ## 

1754 

1755 filtered = children[i].children 

1756 else: 

1757 filtered.append(children[i]) 

1758 

1759 if self.append_none: 

1760 filtered += [None] * self.append_none 

1761 

1762 return self.node_builder(filtered) 

1763 

1764 

1765class ChildFilterLALR_NoPlaceholders(ChildFilter): 

1766 #-- 

1767 def __init__(self, to_include, node_builder): 

1768 self.node_builder = node_builder 

1769 self.to_include = to_include 

1770 

1771 def __call__(self, children): 

1772 filtered = [] 

1773 for i, to_expand in self.to_include: 

1774 if to_expand: 

1775 if filtered: 

1776 filtered += children[i].children 

1777 else: ## 

1778 

1779 filtered = children[i].children 

1780 else: 

1781 filtered.append(children[i]) 

1782 return self.node_builder(filtered) 

1783 

1784 

1785def _should_expand(sym): 

1786 return not sym.is_term and sym.name.startswith('_') 

1787 

1788 

1789def maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indices: List[bool]): 

1790 ## 

1791 

1792 if _empty_indices: 

1793 assert _empty_indices.count(False) == len(expansion) 

1794 s = ''.join(str(int(b)) for b in _empty_indices) 

1795 empty_indices = [len(ones) for ones in s.split('0')] 

1796 assert len(empty_indices) == len(expansion)+1, (empty_indices, len(expansion)) 

1797 else: 

1798 empty_indices = [0] * (len(expansion)+1) 

1799 

1800 to_include = [] 

1801 nones_to_add = 0 

1802 for i, sym in enumerate(expansion): 

1803 nones_to_add += empty_indices[i] 

1804 if keep_all_tokens or not (sym.is_term and sym.filter_out): 

1805 to_include.append((i, _should_expand(sym), nones_to_add)) 

1806 nones_to_add = 0 

1807 

1808 nones_to_add += empty_indices[len(expansion)] 

1809 

1810 if _empty_indices or len(to_include) < len(expansion) or any(to_expand for i, to_expand,_ in to_include): 

1811 if _empty_indices or ambiguous: 

1812 return partial(ChildFilter if ambiguous else ChildFilterLALR, to_include, nones_to_add) 

1813 else: 

1814 ## 

1815 

1816 return partial(ChildFilterLALR_NoPlaceholders, [(i, x) for i,x,_ in to_include]) 

1817 

1818 

1819class AmbiguousExpander: 

1820 #-- 

1821 def __init__(self, to_expand, tree_class, node_builder): 

1822 self.node_builder = node_builder 

1823 self.tree_class = tree_class 

1824 self.to_expand = to_expand 

1825 

1826 def __call__(self, children): 

1827 def _is_ambig_tree(t): 

1828 return hasattr(t, 'data') and t.data == '_ambig' 

1829 

1830 ## 

1831 

1832 ## 

1833 

1834 ## 

1835 

1836 ## 

1837 

1838 ambiguous = [] 

1839 for i, child in enumerate(children): 

1840 if _is_ambig_tree(child): 

1841 if i in self.to_expand: 

1842 ambiguous.append(i) 

1843 

1844 child.expand_kids_by_data('_ambig') 

1845 

1846 if not ambiguous: 

1847 return self.node_builder(children) 

1848 

1849 expand = [iter(child.children) if i in ambiguous else repeat(child) for i, child in enumerate(children)] 

1850 return self.tree_class('_ambig', [self.node_builder(list(f[0])) for f in product(zip(*expand))]) 

1851 

1852 

1853def maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens): 

1854 to_expand = [i for i, sym in enumerate(expansion) 

1855 if keep_all_tokens or ((not (sym.is_term and sym.filter_out)) and _should_expand(sym))] 

1856 if to_expand: 

1857 return partial(AmbiguousExpander, to_expand, tree_class) 

1858 

1859 

1860class AmbiguousIntermediateExpander: 

1861 #-- 

1862 

1863 def __init__(self, tree_class, node_builder): 

1864 self.node_builder = node_builder 

1865 self.tree_class = tree_class 

1866 

1867 def __call__(self, children): 

1868 def _is_iambig_tree(child): 

1869 return hasattr(child, 'data') and child.data == '_iambig' 

1870 

1871 def _collapse_iambig(children): 

1872 #-- 

1873 

1874 ## 

1875 

1876 ## 

1877 

1878 if children and _is_iambig_tree(children[0]): 

1879 iambig_node = children[0] 

1880 result = [] 

1881 for grandchild in iambig_node.children: 

1882 collapsed = _collapse_iambig(grandchild.children) 

1883 if collapsed: 

1884 for child in collapsed: 

1885 child.children += children[1:] 

1886 result += collapsed 

1887 else: 

1888 new_tree = self.tree_class('_inter', grandchild.children + children[1:]) 

1889 result.append(new_tree) 

1890 return result 

1891 

1892 collapsed = _collapse_iambig(children) 

1893 if collapsed: 

1894 processed_nodes = [self.node_builder(c.children) for c in collapsed] 

1895 return self.tree_class('_ambig', processed_nodes) 

1896 

1897 return self.node_builder(children) 

1898 

1899 

1900 

1901def inplace_transformer(func): 

1902 @wraps(func) 

1903 def f(children): 

1904 ## 

1905 

1906 tree = Tree(func.__name__, children) 

1907 return func(tree) 

1908 return f 

1909 

1910 

1911def apply_visit_wrapper(func, name, wrapper): 

1912 if wrapper is _vargs_meta or wrapper is _vargs_meta_inline: 

1913 raise NotImplementedError("Meta args not supported for internal transformer") 

1914 

1915 @wraps(func) 

1916 def f(children): 

1917 return wrapper(func, name, children, None) 

1918 return f 

1919 

1920 

1921class ParseTreeBuilder: 

1922 def __init__(self, rules, tree_class, propagate_positions=False, ambiguous=False, maybe_placeholders=False): 

1923 self.tree_class = tree_class 

1924 self.propagate_positions = propagate_positions 

1925 self.ambiguous = ambiguous 

1926 self.maybe_placeholders = maybe_placeholders 

1927 

1928 self.rule_builders = list(self._init_builders(rules)) 

1929 

1930 def _init_builders(self, rules): 

1931 propagate_positions = make_propagate_positions(self.propagate_positions) 

1932 

1933 for rule in rules: 

1934 options = rule.options 

1935 keep_all_tokens = options.keep_all_tokens 

1936 expand_single_child = options.expand1 

1937 

1938 wrapper_chain = list(filter(None, [ 

1939 (expand_single_child and not rule.alias) and ExpandSingleChild, 

1940 maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders else None), 

1941 propagate_positions, 

1942 self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens), 

1943 self.ambiguous and partial(AmbiguousIntermediateExpander, self.tree_class) 

1944 ])) 

1945 

1946 yield rule, wrapper_chain 

1947 

1948 def create_callback(self, transformer=None): 

1949 callbacks = {} 

1950 

1951 default_handler = getattr(transformer, '__default__', None) 

1952 if default_handler: 

1953 def default_callback(data, children): 

1954 return default_handler(data, children, None) 

1955 else: 

1956 default_callback = self.tree_class 

1957 

1958 for rule, wrapper_chain in self.rule_builders: 

1959 

1960 user_callback_name = rule.alias or rule.options.template_source or rule.origin.name 

1961 try: 

1962 f = getattr(transformer, user_callback_name) 

1963 wrapper = getattr(f, 'visit_wrapper', None) 

1964 if wrapper is not None: 

1965 f = apply_visit_wrapper(f, user_callback_name, wrapper) 

1966 elif isinstance(transformer, Transformer_InPlace): 

1967 f = inplace_transformer(f) 

1968 except AttributeError: 

1969 f = partial(default_callback, user_callback_name) 

1970 

1971 for w in wrapper_chain: 

1972 f = w(f) 

1973 

1974 if rule in callbacks: 

1975 raise GrammarError("Rule '%s' already exists" % (rule,)) 

1976 

1977 callbacks[rule] = f 

1978 

1979 return callbacks 

1980 

1981 

1982 

1983class LALR_Parser(Serialize): 

1984 def __init__(self, parser_conf, debug=False): 

1985 analysis = LALR_Analyzer(parser_conf, debug=debug) 

1986 analysis.compute_lalr() 

1987 callbacks = parser_conf.callbacks 

1988 

1989 self._parse_table = analysis.parse_table 

1990 self.parser_conf = parser_conf 

1991 self.parser = _Parser(analysis.parse_table, callbacks, debug) 

1992 

1993 @classmethod 

1994 def deserialize(cls, data, memo, callbacks, debug=False): 

1995 inst = cls.__new__(cls) 

1996 inst._parse_table = IntParseTable.deserialize(data, memo) 

1997 inst.parser = _Parser(inst._parse_table, callbacks, debug) 

1998 return inst 

1999 

2000 def serialize(self, memo): 

2001 return self._parse_table.serialize(memo) 

2002 

2003 def parse_interactive(self, lexer, start): 

2004 return self.parser.parse(lexer, start, start_interactive=True) 

2005 

2006 def parse(self, lexer, start, on_error=None): 

2007 try: 

2008 return self.parser.parse(lexer, start) 

2009 except UnexpectedInput as e: 

2010 if on_error is None: 

2011 raise 

2012 

2013 while True: 

2014 if isinstance(e, UnexpectedCharacters): 

2015 s = e.interactive_parser.lexer_thread.state 

2016 p = s.line_ctr.char_pos 

2017 

2018 if not on_error(e): 

2019 raise e 

2020 

2021 if isinstance(e, UnexpectedCharacters): 

2022 ## 

2023 

2024 if p == s.line_ctr.char_pos: 

2025 s.line_ctr.feed(s.text[p:p+1]) 

2026 

2027 try: 

2028 return e.interactive_parser.resume_parse() 

2029 except UnexpectedToken as e2: 

2030 if (isinstance(e, UnexpectedToken) 

2031 and e.token.type == e2.token.type == '$END' 

2032 and e.interactive_parser == e2.interactive_parser): 

2033 ## 

2034 

2035 raise e2 

2036 e = e2 

2037 except UnexpectedCharacters as e2: 

2038 e = e2 

2039 

2040 

2041class ParseConf: 

2042 __slots__ = 'parse_table', 'callbacks', 'start', 'start_state', 'end_state', 'states' 

2043 

2044 def __init__(self, parse_table, callbacks, start): 

2045 self.parse_table = parse_table 

2046 

2047 self.start_state = self.parse_table.start_states[start] 

2048 self.end_state = self.parse_table.end_states[start] 

2049 self.states = self.parse_table.states 

2050 

2051 self.callbacks = callbacks 

2052 self.start = start 

2053 

2054 

2055class ParserState: 

2056 __slots__ = 'parse_conf', 'lexer', 'state_stack', 'value_stack' 

2057 

2058 def __init__(self, parse_conf, lexer, state_stack=None, value_stack=None): 

2059 self.parse_conf = parse_conf 

2060 self.lexer = lexer 

2061 self.state_stack = state_stack or [self.parse_conf.start_state] 

2062 self.value_stack = value_stack or [] 

2063 

2064 @property 

2065 def position(self): 

2066 return self.state_stack[-1] 

2067 

2068 ## 

2069 

2070 def __eq__(self, other): 

2071 if not isinstance(other, ParserState): 

2072 return NotImplemented 

2073 return len(self.state_stack) == len(other.state_stack) and self.position == other.position 

2074 

2075 def __copy__(self): 

2076 return type(self)( 

2077 self.parse_conf, 

2078 self.lexer, ## 

2079 

2080 copy(self.state_stack), 

2081 deepcopy(self.value_stack), 

2082 ) 

2083 

2084 def copy(self): 

2085 return copy(self) 

2086 

2087 def feed_token(self, token, is_end=False): 

2088 state_stack = self.state_stack 

2089 value_stack = self.value_stack 

2090 states = self.parse_conf.states 

2091 end_state = self.parse_conf.end_state 

2092 callbacks = self.parse_conf.callbacks 

2093 

2094 while True: 

2095 state = state_stack[-1] 

2096 try: 

2097 action, arg = states[state][token.type] 

2098 except KeyError: 

2099 expected = {s for s in states[state].keys() if s.isupper()} 

2100 raise UnexpectedToken(token, expected, state=self, interactive_parser=None) 

2101 

2102 assert arg != end_state 

2103 

2104 if action is Shift: 

2105 ## 

2106 

2107 assert not is_end 

2108 state_stack.append(arg) 

2109 value_stack.append(token if token.type not in callbacks else callbacks[token.type](token)) 

2110 return 

2111 else: 

2112 ## 

2113 

2114 rule = arg 

2115 size = len(rule.expansion) 

2116 if size: 

2117 s = value_stack[-size:] 

2118 del state_stack[-size:] 

2119 del value_stack[-size:] 

2120 else: 

2121 s = [] 

2122 

2123 value = callbacks[rule](s) 

2124 

2125 _action, new_state = states[state_stack[-1]][rule.origin.name] 

2126 assert _action is Shift 

2127 state_stack.append(new_state) 

2128 value_stack.append(value) 

2129 

2130 if is_end and state_stack[-1] == end_state: 

2131 return value_stack[-1] 

2132 

2133class _Parser: 

2134 def __init__(self, parse_table, callbacks, debug=False): 

2135 self.parse_table = parse_table 

2136 self.callbacks = callbacks 

2137 self.debug = debug 

2138 

2139 def parse(self, lexer, start, value_stack=None, state_stack=None, start_interactive=False): 

2140 parse_conf = ParseConf(self.parse_table, self.callbacks, start) 

2141 parser_state = ParserState(parse_conf, lexer, state_stack, value_stack) 

2142 if start_interactive: 

2143 return InteractiveParser(self, parser_state, parser_state.lexer) 

2144 return self.parse_from_state(parser_state) 

2145 

2146 

2147 def parse_from_state(self, state): 

2148 ## 

2149 

2150 try: 

2151 token = None 

2152 for token in state.lexer.lex(state): 

2153 state.feed_token(token) 

2154 

2155 end_token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1) 

2156 return state.feed_token(end_token, True) 

2157 except UnexpectedInput as e: 

2158 try: 

2159 e.interactive_parser = InteractiveParser(self, state, state.lexer) 

2160 except NameError: 

2161 pass 

2162 raise e 

2163 except Exception as e: 

2164 if self.debug: 

2165 print("") 

2166 print("STATE STACK DUMP") 

2167 print("----------------") 

2168 for i, s in enumerate(state.state_stack): 

2169 print('%d)' % i , s) 

2170 print("") 

2171 

2172 raise 

2173 

2174 

2175class Action: 

2176 def __init__(self, name): 

2177 self.name = name 

2178 def __str__(self): 

2179 return self.name 

2180 def __repr__(self): 

2181 return str(self) 

2182 

2183Shift = Action('Shift') 

2184Reduce = Action('Reduce') 

2185 

2186 

2187class ParseTable: 

2188 def __init__(self, states, start_states, end_states): 

2189 self.states = states 

2190 self.start_states = start_states 

2191 self.end_states = end_states 

2192 

2193 def serialize(self, memo): 

2194 tokens = Enumerator() 

2195 rules = Enumerator() 

2196 

2197 states = { 

2198 state: {tokens.get(token): ((1, arg.serialize(memo)) if action is Reduce else (0, arg)) 

2199 for token, (action, arg) in actions.items()} 

2200 for state, actions in self.states.items() 

2201 } 

2202 

2203 return { 

2204 'tokens': tokens.reversed(), 

2205 'states': states, 

2206 'start_states': self.start_states, 

2207 'end_states': self.end_states, 

2208 } 

2209 

2210 @classmethod 

2211 def deserialize(cls, data, memo): 

2212 tokens = data['tokens'] 

2213 states = { 

2214 state: {tokens[token]: ((Reduce, Rule.deserialize(arg, memo)) if action==1 else (Shift, arg)) 

2215 for token, (action, arg) in actions.items()} 

2216 for state, actions in data['states'].items() 

2217 } 

2218 return cls(states, data['start_states'], data['end_states']) 

2219 

2220 

2221class IntParseTable(ParseTable): 

2222 

2223 @classmethod 

2224 def from_ParseTable(cls, parse_table): 

2225 enum = list(parse_table.states) 

2226 state_to_idx = {s:i for i,s in enumerate(enum)} 

2227 int_states = {} 

2228 

2229 for s, la in parse_table.states.items(): 

2230 la = {k:(v[0], state_to_idx[v[1]]) if v[0] is Shift else v 

2231 for k,v in la.items()} 

2232 int_states[ state_to_idx[s] ] = la 

2233 

2234 

2235 start_states = {start:state_to_idx[s] for start, s in parse_table.start_states.items()} 

2236 end_states = {start:state_to_idx[s] for start, s in parse_table.end_states.items()} 

2237 return cls(int_states, start_states, end_states) 

2238 

2239 

2240 

2241def _wrap_lexer(lexer_class): 

2242 future_interface = getattr(lexer_class, '__future_interface__', False) 

2243 if future_interface: 

2244 return lexer_class 

2245 else: 

2246 class CustomLexerWrapper(Lexer): 

2247 def __init__(self, lexer_conf): 

2248 self.lexer = lexer_class(lexer_conf) 

2249 def lex(self, lexer_state, parser_state): 

2250 return self.lexer.lex(lexer_state.text) 

2251 return CustomLexerWrapper 

2252 

2253 

2254def _deserialize_parsing_frontend(data, memo, lexer_conf, callbacks, options): 

2255 parser_conf = ParserConf.deserialize(data['parser_conf'], memo) 

2256 cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser 

2257 parser = cls.deserialize(data['parser'], memo, callbacks, options.debug) 

2258 parser_conf.callbacks = callbacks 

2259 return ParsingFrontend(lexer_conf, parser_conf, options, parser=parser) 

2260 

2261 

2262_parser_creators: 'Dict[str, Callable[[LexerConf, Any, Any], Any]]' = {} 

2263 

2264 

2265class ParsingFrontend(Serialize): 

2266 __serialize_fields__ = 'lexer_conf', 'parser_conf', 'parser' 

2267 

2268 def __init__(self, lexer_conf, parser_conf, options, parser=None): 

2269 self.parser_conf = parser_conf 

2270 self.lexer_conf = lexer_conf 

2271 self.options = options 

2272 

2273 ## 

2274 

2275 if parser: ## 

2276 

2277 self.parser = parser 

2278 else: 

2279 create_parser = _parser_creators.get(parser_conf.parser_type) 

2280 assert create_parser is not None, "{} is not supported in standalone mode".format( 

2281 parser_conf.parser_type 

2282 ) 

2283 self.parser = create_parser(lexer_conf, parser_conf, options) 

2284 

2285 ## 

2286 

2287 lexer_type = lexer_conf.lexer_type 

2288 self.skip_lexer = False 

2289 if lexer_type in ('dynamic', 'dynamic_complete'): 

2290 assert lexer_conf.postlex is None 

2291 self.skip_lexer = True 

2292 return 

2293 

2294 try: 

2295 create_lexer = { 

2296 'basic': create_basic_lexer, 

2297 'contextual': create_contextual_lexer, 

2298 }[lexer_type] 

2299 except KeyError: 

2300 assert issubclass(lexer_type, Lexer), lexer_type 

2301 self.lexer = _wrap_lexer(lexer_type)(lexer_conf) 

2302 else: 

2303 self.lexer = create_lexer(lexer_conf, self.parser, lexer_conf.postlex, options) 

2304 

2305 if lexer_conf.postlex: 

2306 self.lexer = PostLexConnector(self.lexer, lexer_conf.postlex) 

2307 

2308 def _verify_start(self, start=None): 

2309 if start is None: 

2310 start_decls = self.parser_conf.start 

2311 if len(start_decls) > 1: 

2312 raise ConfigurationError("Lark initialized with more than 1 possible start rule. Must specify which start rule to parse", start_decls) 

2313 start ,= start_decls 

2314 elif start not in self.parser_conf.start: 

2315 raise ConfigurationError("Unknown start rule %s. Must be one of %r" % (start, self.parser_conf.start)) 

2316 return start 

2317 

2318 def _make_lexer_thread(self, text): 

2319 cls = (self.options and self.options._plugins.get('LexerThread')) or LexerThread 

2320 return text if self.skip_lexer else cls.from_text(self.lexer, text) 

2321 

2322 def parse(self, text, start=None, on_error=None): 

2323 chosen_start = self._verify_start(start) 

2324 kw = {} if on_error is None else {'on_error': on_error} 

2325 stream = self._make_lexer_thread(text) 

2326 return self.parser.parse(stream, chosen_start, **kw) 

2327 

2328 def parse_interactive(self, text=None, start=None): 

2329 chosen_start = self._verify_start(start) 

2330 if self.parser_conf.parser_type != 'lalr': 

2331 raise ConfigurationError("parse_interactive() currently only works with parser='lalr' ") 

2332 stream = self._make_lexer_thread(text) 

2333 return self.parser.parse_interactive(stream, chosen_start) 

2334 

2335 

2336def _validate_frontend_args(parser, lexer) -> None: 

2337 assert_config(parser, ('lalr', 'earley', 'cyk')) 

2338 if not isinstance(lexer, type): ## 

2339 

2340 expected = { 

2341 'lalr': ('basic', 'contextual'), 

2342 'earley': ('basic', 'dynamic', 'dynamic_complete'), 

2343 'cyk': ('basic', ), 

2344 }[parser] 

2345 assert_config(lexer, expected, 'Parser %r does not support lexer %%r, expected one of %%s' % parser) 

2346 

2347 

2348def _get_lexer_callbacks(transformer, terminals): 

2349 result = {} 

2350 for terminal in terminals: 

2351 callback = getattr(transformer, terminal.name, None) 

2352 if callback is not None: 

2353 result[terminal.name] = callback 

2354 return result 

2355 

2356class PostLexConnector: 

2357 def __init__(self, lexer, postlexer): 

2358 self.lexer = lexer 

2359 self.postlexer = postlexer 

2360 

2361 def lex(self, lexer_state, parser_state): 

2362 i = self.lexer.lex(lexer_state, parser_state) 

2363 return self.postlexer.process(i) 

2364 

2365 

2366 

2367def create_basic_lexer(lexer_conf, parser, postlex, options): 

2368 cls = (options and options._plugins.get('BasicLexer')) or BasicLexer 

2369 return cls(lexer_conf) 

2370 

2371def create_contextual_lexer(lexer_conf, parser, postlex, options): 

2372 cls = (options and options._plugins.get('ContextualLexer')) or ContextualLexer 

2373 states = {idx:list(t.keys()) for idx, t in parser._parse_table.states.items()} 

2374 always_accept = postlex.always_accept if postlex else () 

2375 return cls(lexer_conf, states, always_accept=always_accept) 

2376 

2377def create_lalr_parser(lexer_conf, parser_conf, options=None): 

2378 debug = options.debug if options else False 

2379 cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser 

2380 return cls(parser_conf, debug=debug) 

2381 

2382_parser_creators['lalr'] = create_lalr_parser 

2383 

2384 

2385 

2386 

2387class PostLex(ABC): 

2388 @abstractmethod 

2389 def process(self, stream: Iterator[Token]) -> Iterator[Token]: 

2390 return stream 

2391 

2392 always_accept: Iterable[str] = () 

2393 

2394class LarkOptions(Serialize): 

2395 #-- 

2396 

2397 start: List[str] 

2398 debug: bool 

2399 transformer: 'Optional[Transformer]' 

2400 propagate_positions: Union[bool, str] 

2401 maybe_placeholders: bool 

2402 cache: Union[bool, str] 

2403 regex: bool 

2404 g_regex_flags: int 

2405 keep_all_tokens: bool 

2406 tree_class: Any 

2407 parser: _ParserArgType 

2408 lexer: _LexerArgType 

2409 ambiguity: 'Literal["auto", "resolve", "explicit", "forest"]' 

2410 postlex: Optional[PostLex] 

2411 priority: 'Optional[Literal["auto", "normal", "invert"]]' 

2412 lexer_callbacks: Dict[str, Callable[[Token], Token]] 

2413 use_bytes: bool 

2414 edit_terminals: Optional[Callable[[TerminalDef], TerminalDef]] 

2415 import_paths: 'List[Union[str, Callable[[Union[None, str, PackageResource], str], Tuple[str, str]]]]' 

2416 source_path: Optional[str] 

2417 

2418 OPTIONS_DOC = """ 

2419 **=== General Options ===** 

2420 

2421 start 

2422 The start symbol. Either a string, or a list of strings for multiple possible starts (Default: "start") 

2423 debug 

2424 Display debug information and extra warnings. Use only when debugging (Default: ``False``) 

2425 When used with Earley, it generates a forest graph as "sppf.png", if 'dot' is installed. 

2426 transformer 

2427 Applies the transformer to every parse tree (equivalent to applying it after the parse, but faster) 

2428 propagate_positions 

2429 Propagates (line, column, end_line, end_column) attributes into all tree branches. 

2430 Accepts ``False``, ``True``, or a callable, which will filter which nodes to ignore when propagating. 

2431 maybe_placeholders 

2432 When ``True``, the ``[]`` operator returns ``None`` when not matched. 

2433 When ``False``, ``[]`` behaves like the ``?`` operator, and returns no value at all. 

2434 (default= ``True``) 

2435 cache 

2436 Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. LALR only for now. 

2437 

2438 - When ``False``, does nothing (default) 

2439 - When ``True``, caches to a temporary file in the local directory 

2440 - When given a string, caches to the path pointed by the string 

2441 regex 

2442 When True, uses the ``regex`` module instead of the stdlib ``re``. 

2443 g_regex_flags 

2444 Flags that are applied to all terminals (both regex and strings) 

2445 keep_all_tokens 

2446 Prevent the tree builder from automagically removing "punctuation" tokens (Default: ``False``) 

2447 tree_class 

2448 Lark will produce trees comprised of instances of this class instead of the default ``lark.Tree``. 

2449 

2450 **=== Algorithm Options ===** 

2451 

2452 parser 

2453 Decides which parser engine to use. Accepts "earley" or "lalr". (Default: "earley"). 

2454 (there is also a "cyk" option for legacy) 

2455 lexer 

2456 Decides whether or not to use a lexer stage 

2457 

2458 - "auto" (default): Choose for me based on the parser 

2459 - "basic": Use a basic lexer 

2460 - "contextual": Stronger lexer (only works with parser="lalr") 

2461 - "dynamic": Flexible and powerful (only with parser="earley") 

2462 - "dynamic_complete": Same as dynamic, but tries *every* variation of tokenizing possible. 

2463 ambiguity 

2464 Decides how to handle ambiguity in the parse. Only relevant if parser="earley" 

2465 

2466 - "resolve": The parser will automatically choose the simplest derivation 

2467 (it chooses consistently: greedy for tokens, non-greedy for rules) 

2468 - "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest). 

2469 - "forest": The parser will return the root of the shared packed parse forest. 

2470 

2471 **=== Misc. / Domain Specific Options ===** 

2472 

2473 postlex 

2474 Lexer post-processing (Default: ``None``) Only works with the basic and contextual lexers. 

2475 priority 

2476 How priorities should be evaluated - "auto", ``None``, "normal", "invert" (Default: "auto") 

2477 lexer_callbacks 

2478 Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution. 

2479 use_bytes 

2480 Accept an input of type ``bytes`` instead of ``str``. 

2481 edit_terminals 

2482 A callback for editing the terminals before parse. 

2483 import_paths 

2484 A List of either paths or loader functions to specify from where grammars are imported 

2485 source_path 

2486 Override the source of from where the grammar was loaded. Useful for relative imports and unconventional grammar loading 

2487 **=== End of Options ===** 

2488 """ 

2489 if __doc__: 

2490 __doc__ += OPTIONS_DOC 

2491 

2492 

2493 ## 

2494 

2495 ## 

2496 

2497 ## 

2498 

2499 ## 

2500 

2501 ## 

2502 

2503 ## 

2504 

2505 _defaults: Dict[str, Any] = { 

2506 'debug': False, 

2507 'keep_all_tokens': False, 

2508 'tree_class': None, 

2509 'cache': False, 

2510 'postlex': None, 

2511 'parser': 'earley', 

2512 'lexer': 'auto', 

2513 'transformer': None, 

2514 'start': 'start', 

2515 'priority': 'auto', 

2516 'ambiguity': 'auto', 

2517 'regex': False, 

2518 'propagate_positions': False, 

2519 'lexer_callbacks': {}, 

2520 'maybe_placeholders': True, 

2521 'edit_terminals': None, 

2522 'g_regex_flags': 0, 

2523 'use_bytes': False, 

2524 'import_paths': [], 

2525 'source_path': None, 

2526 '_plugins': {}, 

2527 } 

2528 

2529 def __init__(self, options_dict): 

2530 o = dict(options_dict) 

2531 

2532 options = {} 

2533 for name, default in self._defaults.items(): 

2534 if name in o: 

2535 value = o.pop(name) 

2536 if isinstance(default, bool) and name not in ('cache', 'use_bytes', 'propagate_positions'): 

2537 value = bool(value) 

2538 else: 

2539 value = default 

2540 

2541 options[name] = value 

2542 

2543 if isinstance(options['start'], str): 

2544 options['start'] = [options['start']] 

2545 

2546 self.__dict__['options'] = options 

2547 

2548 

2549 assert_config(self.parser, ('earley', 'lalr', 'cyk', None)) 

2550 

2551 if self.parser == 'earley' and self.transformer: 

2552 raise ConfigurationError('Cannot specify an embedded transformer when using the Earley algorithm. ' 

2553 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)') 

2554 

2555 if o: 

2556 raise ConfigurationError("Unknown options: %s" % o.keys()) 

2557 

2558 def __getattr__(self, name): 

2559 try: 

2560 return self.__dict__['options'][name] 

2561 except KeyError as e: 

2562 raise AttributeError(e) 

2563 

2564 def __setattr__(self, name, value): 

2565 assert_config(name, self.options.keys(), "%r isn't a valid option. Expected one of: %s") 

2566 self.options[name] = value 

2567 

2568 def serialize(self, memo): 

2569 return self.options 

2570 

2571 @classmethod 

2572 def deserialize(cls, data, memo): 

2573 return cls(data) 

2574 

2575 

2576## 

2577 

2578## 

2579 

2580_LOAD_ALLOWED_OPTIONS = {'postlex', 'transformer', 'lexer_callbacks', 'use_bytes', 'debug', 'g_regex_flags', 'regex', 'propagate_positions', 'tree_class', '_plugins'} 

2581 

2582_VALID_PRIORITY_OPTIONS = ('auto', 'normal', 'invert', None) 

2583_VALID_AMBIGUITY_OPTIONS = ('auto', 'resolve', 'explicit', 'forest') 

2584 

2585 

2586_T = TypeVar('_T', bound="Lark") 

2587 

2588class Lark(Serialize): 

2589 #-- 

2590 

2591 source_path: str 

2592 source_grammar: str 

2593 grammar: 'Grammar' 

2594 options: LarkOptions 

2595 lexer: Lexer 

2596 terminals: List[TerminalDef] 

2597 

2598 def __init__(self, grammar: 'Union[Grammar, str, IO[str]]', **options) -> None: 

2599 self.options = LarkOptions(options) 

2600 

2601 ## 

2602 

2603 use_regex = self.options.regex 

2604 if use_regex: 

2605 if regex: 

2606 re_module = regex 

2607 else: 

2608 raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.') 

2609 else: 

2610 re_module = re 

2611 

2612 ## 

2613 

2614 if self.options.source_path is None: 

2615 try: 

2616 self.source_path = grammar.name 

2617 except AttributeError: 

2618 self.source_path = '<string>' 

2619 else: 

2620 self.source_path = self.options.source_path 

2621 

2622 ## 

2623 

2624 try: 

2625 read = grammar.read 

2626 except AttributeError: 

2627 pass 

2628 else: 

2629 grammar = read() 

2630 

2631 cache_fn = None 

2632 cache_md5 = None 

2633 if isinstance(grammar, str): 

2634 self.source_grammar = grammar 

2635 if self.options.use_bytes: 

2636 if not isascii(grammar): 

2637 raise ConfigurationError("Grammar must be ascii only, when use_bytes=True") 

2638 

2639 if self.options.cache: 

2640 if self.options.parser != 'lalr': 

2641 raise ConfigurationError("cache only works with parser='lalr' for now") 

2642 

2643 unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals', '_plugins') 

2644 options_str = ''.join(k+str(v) for k, v in options.items() if k not in unhashable) 

2645 from . import __version__ 

2646 s = grammar + options_str + __version__ + str(sys.version_info[:2]) 

2647 cache_md5 = hashlib.md5(s.encode('utf8')).hexdigest() 

2648 

2649 if isinstance(self.options.cache, str): 

2650 cache_fn = self.options.cache 

2651 else: 

2652 if self.options.cache is not True: 

2653 raise ConfigurationError("cache argument must be bool or str") 

2654 

2655 cache_fn = tempfile.gettempdir() + '/.lark_cache_%s_%s_%s.tmp' % (cache_md5, *sys.version_info[:2]) 

2656 

2657 if FS.exists(cache_fn): 

2658 logger.debug('Loading grammar from cache: %s', cache_fn) 

2659 ## 

2660 

2661 for name in (set(options) - _LOAD_ALLOWED_OPTIONS): 

2662 del options[name] 

2663 with FS.open(cache_fn, 'rb') as f: 

2664 old_options = self.options 

2665 try: 

2666 file_md5 = f.readline().rstrip(b'\n') 

2667 cached_used_files = pickle.load(f) 

2668 if file_md5 == cache_md5.encode('utf8') and verify_used_files(cached_used_files): 

2669 cached_parser_data = pickle.load(f) 

2670 self._load(cached_parser_data, **options) 

2671 return 

2672 except Exception: ## 

2673 

2674 logger.exception("Failed to load Lark from cache: %r. We will try to carry on." % cache_fn) 

2675 

2676 ## 

2677 

2678 ## 

2679 

2680 self.options = old_options 

2681 

2682 

2683 ## 

2684 

2685 self.grammar, used_files = load_grammar(grammar, self.source_path, self.options.import_paths, self.options.keep_all_tokens) 

2686 else: 

2687 assert isinstance(grammar, Grammar) 

2688 self.grammar = grammar 

2689 

2690 

2691 if self.options.lexer == 'auto': 

2692 if self.options.parser == 'lalr': 

2693 self.options.lexer = 'contextual' 

2694 elif self.options.parser == 'earley': 

2695 if self.options.postlex is not None: 

2696 logger.info("postlex can't be used with the dynamic lexer, so we use 'basic' instead. " 

2697 "Consider using lalr with contextual instead of earley") 

2698 self.options.lexer = 'basic' 

2699 else: 

2700 self.options.lexer = 'dynamic' 

2701 elif self.options.parser == 'cyk': 

2702 self.options.lexer = 'basic' 

2703 else: 

2704 assert False, self.options.parser 

2705 lexer = self.options.lexer 

2706 if isinstance(lexer, type): 

2707 assert issubclass(lexer, Lexer) ## 

2708 

2709 else: 

2710 assert_config(lexer, ('basic', 'contextual', 'dynamic', 'dynamic_complete')) 

2711 if self.options.postlex is not None and 'dynamic' in lexer: 

2712 raise ConfigurationError("Can't use postlex with a dynamic lexer. Use basic or contextual instead") 

2713 

2714 if self.options.ambiguity == 'auto': 

2715 if self.options.parser == 'earley': 

2716 self.options.ambiguity = 'resolve' 

2717 else: 

2718 assert_config(self.options.parser, ('earley', 'cyk'), "%r doesn't support disambiguation. Use one of these parsers instead: %s") 

2719 

2720 if self.options.priority == 'auto': 

2721 self.options.priority = 'normal' 

2722 

2723 if self.options.priority not in _VALID_PRIORITY_OPTIONS: 

2724 raise ConfigurationError("invalid priority option: %r. Must be one of %r" % (self.options.priority, _VALID_PRIORITY_OPTIONS)) 

2725 if self.options.ambiguity not in _VALID_AMBIGUITY_OPTIONS: 

2726 raise ConfigurationError("invalid ambiguity option: %r. Must be one of %r" % (self.options.ambiguity, _VALID_AMBIGUITY_OPTIONS)) 

2727 

2728 if self.options.parser is None: 

2729 terminals_to_keep = '*' 

2730 elif self.options.postlex is not None: 

2731 terminals_to_keep = set(self.options.postlex.always_accept) 

2732 else: 

2733 terminals_to_keep = set() 

2734 

2735 ## 

2736 

2737 self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start, terminals_to_keep) 

2738 

2739 if self.options.edit_terminals: 

2740 for t in self.terminals: 

2741 self.options.edit_terminals(t) 

2742 

2743 self._terminals_dict = {t.name: t for t in self.terminals} 

2744 

2745 ## 

2746 

2747 if self.options.priority == 'invert': 

2748 for rule in self.rules: 

2749 if rule.options.priority is not None: 

2750 rule.options.priority = -rule.options.priority 

2751 for term in self.terminals: 

2752 term.priority = -term.priority 

2753 ## 

2754 

2755 ## 

2756 

2757 ## 

2758 

2759 elif self.options.priority is None: 

2760 for rule in self.rules: 

2761 if rule.options.priority is not None: 

2762 rule.options.priority = None 

2763 for term in self.terminals: 

2764 term.priority = 0 

2765 

2766 ## 

2767 

2768 self.lexer_conf = LexerConf( 

2769 self.terminals, re_module, self.ignore_tokens, self.options.postlex, 

2770 self.options.lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes 

2771 ) 

2772 

2773 if self.options.parser: 

2774 self.parser = self._build_parser() 

2775 elif lexer: 

2776 self.lexer = self._build_lexer() 

2777 

2778 if cache_fn: 

2779 logger.debug('Saving grammar to cache: %s', cache_fn) 

2780 with FS.open(cache_fn, 'wb') as f: 

2781 assert cache_md5 is not None 

2782 f.write(cache_md5.encode('utf8') + b'\n') 

2783 pickle.dump(used_files, f) 

2784 self.save(f, _LOAD_ALLOWED_OPTIONS) 

2785 

2786 if __doc__: 

2787 __doc__ += "\n\n" + LarkOptions.OPTIONS_DOC 

2788 

2789 __serialize_fields__ = 'parser', 'rules', 'options' 

2790 

2791 def _build_lexer(self, dont_ignore=False): 

2792 lexer_conf = self.lexer_conf 

2793 if dont_ignore: 

2794 from copy import copy 

2795 lexer_conf = copy(lexer_conf) 

2796 lexer_conf.ignore = () 

2797 return BasicLexer(lexer_conf) 

2798 

2799 def _prepare_callbacks(self): 

2800 self._callbacks = {} 

2801 ## 

2802 

2803 if self.options.ambiguity != 'forest': 

2804 self._parse_tree_builder = ParseTreeBuilder( 

2805 self.rules, 

2806 self.options.tree_class or Tree, 

2807 self.options.propagate_positions, 

2808 self.options.parser != 'lalr' and self.options.ambiguity == 'explicit', 

2809 self.options.maybe_placeholders 

2810 ) 

2811 self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer) 

2812 self._callbacks.update(_get_lexer_callbacks(self.options.transformer, self.terminals)) 

2813 

2814 def _build_parser(self): 

2815 self._prepare_callbacks() 

2816 _validate_frontend_args(self.options.parser, self.options.lexer) 

2817 parser_conf = ParserConf(self.rules, self._callbacks, self.options.start) 

2818 return _construct_parsing_frontend( 

2819 self.options.parser, 

2820 self.options.lexer, 

2821 self.lexer_conf, 

2822 parser_conf, 

2823 options=self.options 

2824 ) 

2825 

2826 def save(self, f, exclude_options: Collection[str] = ()): 

2827 #-- 

2828 data, m = self.memo_serialize([TerminalDef, Rule]) 

2829 if exclude_options: 

2830 data["options"] = {n: v for n, v in data["options"].items() if n not in exclude_options} 

2831 pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL) 

2832 

2833 @classmethod 

2834 def load(cls, f): 

2835 #-- 

2836 inst = cls.__new__(cls) 

2837 return inst._load(f) 

2838 

2839 def _deserialize_lexer_conf(self, data, memo, options): 

2840 lexer_conf = LexerConf.deserialize(data['lexer_conf'], memo) 

2841 lexer_conf.callbacks = options.lexer_callbacks or {} 

2842 lexer_conf.re_module = regex if options.regex else re 

2843 lexer_conf.use_bytes = options.use_bytes 

2844 lexer_conf.g_regex_flags = options.g_regex_flags 

2845 lexer_conf.skip_validation = True 

2846 lexer_conf.postlex = options.postlex 

2847 return lexer_conf 

2848 

2849 def _load(self, f, **kwargs): 

2850 if isinstance(f, dict): 

2851 d = f 

2852 else: 

2853 d = pickle.load(f) 

2854 memo_json = d['memo'] 

2855 data = d['data'] 

2856 

2857 assert memo_json 

2858 memo = SerializeMemoizer.deserialize(memo_json, {'Rule': Rule, 'TerminalDef': TerminalDef}, {}) 

2859 options = dict(data['options']) 

2860 if (set(kwargs) - _LOAD_ALLOWED_OPTIONS) & set(LarkOptions._defaults): 

2861 raise ConfigurationError("Some options are not allowed when loading a Parser: {}" 

2862 .format(set(kwargs) - _LOAD_ALLOWED_OPTIONS)) 

2863 options.update(kwargs) 

2864 self.options = LarkOptions.deserialize(options, memo) 

2865 self.rules = [Rule.deserialize(r, memo) for r in data['rules']] 

2866 self.source_path = '<deserialized>' 

2867 _validate_frontend_args(self.options.parser, self.options.lexer) 

2868 self.lexer_conf = self._deserialize_lexer_conf(data['parser'], memo, self.options) 

2869 self.terminals = self.lexer_conf.terminals 

2870 self._prepare_callbacks() 

2871 self._terminals_dict = {t.name: t for t in self.terminals} 

2872 self.parser = _deserialize_parsing_frontend( 

2873 data['parser'], 

2874 memo, 

2875 self.lexer_conf, 

2876 self._callbacks, 

2877 self.options, ## 

2878 

2879 ) 

2880 return self 

2881 

2882 @classmethod 

2883 def _load_from_dict(cls, data, memo, **kwargs): 

2884 inst = cls.__new__(cls) 

2885 return inst._load({'data': data, 'memo': memo}, **kwargs) 

2886 

2887 @classmethod 

2888 def open(cls: Type[_T], grammar_filename: str, rel_to: Optional[str]=None, **options) -> _T: 

2889 #-- 

2890 if rel_to: 

2891 basepath = os.path.dirname(rel_to) 

2892 grammar_filename = os.path.join(basepath, grammar_filename) 

2893 with open(grammar_filename, encoding='utf8') as f: 

2894 return cls(f, **options) 

2895 

2896 @classmethod 

2897 def open_from_package(cls: Type[_T], package: str, grammar_path: str, search_paths: 'Sequence[str]'=[""], **options) -> _T: 

2898 #-- 

2899 package_loader = FromPackageLoader(package, search_paths) 

2900 full_path, text = package_loader(None, grammar_path) 

2901 options.setdefault('source_path', full_path) 

2902 options.setdefault('import_paths', []) 

2903 options['import_paths'].append(package_loader) 

2904 return cls(text, **options) 

2905 

2906 def __repr__(self): 

2907 return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source_path, self.options.parser, self.options.lexer) 

2908 

2909 

2910 def lex(self, text: str, dont_ignore: bool=False) -> Iterator[Token]: 

2911 #-- 

2912 if not hasattr(self, 'lexer') or dont_ignore: 

2913 lexer = self._build_lexer(dont_ignore) 

2914 else: 

2915 lexer = self.lexer 

2916 lexer_thread = LexerThread.from_text(lexer, text) 

2917 stream = lexer_thread.lex(None) 

2918 if self.options.postlex: 

2919 return self.options.postlex.process(stream) 

2920 return stream 

2921 

2922 def get_terminal(self, name: str) -> TerminalDef: 

2923 #-- 

2924 return self._terminals_dict[name] 

2925 

2926 def parse_interactive(self, text: Optional[str]=None, start: Optional[str]=None) -> 'InteractiveParser': 

2927 #-- 

2928 return self.parser.parse_interactive(text, start=start) 

2929 

2930 def parse(self, text: str, start: Optional[str]=None, on_error: 'Optional[Callable[[UnexpectedInput], bool]]'=None) -> 'ParseTree': 

2931 #-- 

2932 return self.parser.parse(text, start=start, on_error=on_error) 

2933 

2934 

2935 

2936 

2937class DedentError(LarkError): 

2938 pass 

2939 

2940class Indenter(PostLex, ABC): 

2941 paren_level: int 

2942 indent_level: List[int] 

2943 

2944 def __init__(self) -> None: 

2945 self.paren_level = 0 

2946 self.indent_level = [0] 

2947 assert self.tab_len > 0 

2948 

2949 def handle_NL(self, token: Token) -> Iterator[Token]: 

2950 if self.paren_level > 0: 

2951 return 

2952 

2953 yield token 

2954 

2955 indent_str = token.rsplit('\n', 1)[1] ## 

2956 

2957 indent = indent_str.count(' ') + indent_str.count('\t') * self.tab_len 

2958 

2959 if indent > self.indent_level[-1]: 

2960 self.indent_level.append(indent) 

2961 yield Token.new_borrow_pos(self.INDENT_type, indent_str, token) 

2962 else: 

2963 while indent < self.indent_level[-1]: 

2964 self.indent_level.pop() 

2965 yield Token.new_borrow_pos(self.DEDENT_type, indent_str, token) 

2966 

2967 if indent != self.indent_level[-1]: 

2968 raise DedentError('Unexpected dedent to column %s. Expected dedent to %s' % (indent, self.indent_level[-1])) 

2969 

2970 def _process(self, stream): 

2971 for token in stream: 

2972 if token.type == self.NL_type: 

2973 yield from self.handle_NL(token) 

2974 else: 

2975 yield token 

2976 

2977 if token.type in self.OPEN_PAREN_types: 

2978 self.paren_level += 1 

2979 elif token.type in self.CLOSE_PAREN_types: 

2980 self.paren_level -= 1 

2981 assert self.paren_level >= 0 

2982 

2983 while len(self.indent_level) > 1: 

2984 self.indent_level.pop() 

2985 yield Token(self.DEDENT_type, '') 

2986 

2987 assert self.indent_level == [0], self.indent_level 

2988 

2989 def process(self, stream): 

2990 self.paren_level = 0 

2991 self.indent_level = [0] 

2992 return self._process(stream) 

2993 

2994 ## 

2995 

2996 @property 

2997 def always_accept(self): 

2998 return (self.NL_type,) 

2999 

3000 @property 

3001 @abstractmethod 

3002 def NL_type(self) -> str: 

3003 raise NotImplementedError() 

3004 

3005 @property 

3006 @abstractmethod 

3007 def OPEN_PAREN_types(self) -> List[str]: 

3008 raise NotImplementedError() 

3009 

3010 @property 

3011 @abstractmethod 

3012 def CLOSE_PAREN_types(self) -> List[str]: 

3013 raise NotImplementedError() 

3014 

3015 @property 

3016 @abstractmethod 

3017 def INDENT_type(self) -> str: 

3018 raise NotImplementedError() 

3019 

3020 @property 

3021 @abstractmethod 

3022 def DEDENT_type(self) -> str: 

3023 raise NotImplementedError() 

3024 

3025 @property 

3026 @abstractmethod 

3027 def tab_len(self) -> int: 

3028 raise NotImplementedError() 

3029 

3030 

3031class PythonIndenter(Indenter): 

3032 NL_type = '_NEWLINE' 

3033 OPEN_PAREN_types = ['LPAR', 'LSQB', 'LBRACE'] 

3034 CLOSE_PAREN_types = ['RPAR', 'RSQB', 'RBRACE'] 

3035 INDENT_type = '_INDENT' 

3036 DEDENT_type = '_DEDENT' 

3037 tab_len = 8 

3038 

3039 

3040import pickle, zlib, base64 

3041DATA = ( 

3042{'parser': {'lexer_conf': {'terminals': [{'@': 0}, {'@': 1}, {'@': 2}, {'@': 3}, {'@': 4}, {'@': 5}, {'@': 6}, {'@': 7}, {'@': 8}, {'@': 9}, {'@': 10}, {'@': 11}], 'ignore': ['WS'], 'g_regex_flags': 0, 'use_bytes': False, 'lexer_type': 'contextual', '__type__': 'LexerConf'}, 'parser_conf': {'rules': [{'@': 12}, {'@': 13}, {'@': 14}, {'@': 15}, {'@': 16}, {'@': 17}, {'@': 18}, {'@': 19}, {'@': 20}, {'@': 21}, {'@': 22}, {'@': 23}, {'@': 24}, {'@': 25}, {'@': 26}, {'@': 27}, {'@': 28}, {'@': 29}, {'@': 30}, {'@': 31}, {'@': 32}], 'start': ['start'], 'parser_type': 'lalr', '__type__': 'ParserConf'}, 'parser': {'tokens': {0: 'RSQB', 1: 'COMMA', 2: 'COLON', 3: '$END', 4: 'RBRACE', 5: 'LBRACE', 6: 'NULL', 7: 'TRUE', 8: 'SIGNED_NUMBER', 9: 'string', 10: 'LSQB', 11: 'value', 12: 'FALSE', 13: 'object', 14: 'array', 15: 'ESCAPED_STRING', 16: 'item', 17: '_items', 18: '__array_star_0', 19: 'start', 20: '___items_star_1'}, 'states': {0: {0: (1, {'@': 28}), 1: (1, {'@': 28}), 2: (1, {'@': 28}), 3: (1, {'@': 28}), 4: (1, {'@': 28})}, 1: {5: (0, 8), 6: (0, 14), 7: (0, 9), 8: (0, 19), 9: (0, 15), 10: (0, 26), 11: (0, 6), 12: (0, 10), 13: (0, 27), 14: (0, 4), 15: (0, 0)}, 2: {}, 3: {0: (1, {'@': 24}), 1: (1, {'@': 24}), 3: (1, {'@': 24}), 4: (1, {'@': 24})}, 4: {0: (1, {'@': 14}), 1: (1, {'@': 14}), 3: (1, {'@': 14}), 4: (1, {'@': 14})}, 5: {0: (1, {'@': 29}), 1: (1, {'@': 29})}, 6: {0: (1, {'@': 30}), 1: (1, {'@': 30})}, 7: {9: (0, 21), 15: (0, 0), 16: (0, 16)}, 8: {17: (0, 17), 4: (0, 3), 16: (0, 33), 15: (0, 0), 9: (0, 21)}, 9: {0: (1, {'@': 17}), 1: (1, {'@': 17}), 3: (1, {'@': 17}), 4: (1, {'@': 17})}, 10: {0: (1, {'@': 18}), 1: (1, {'@': 18}), 3: (1, {'@': 18}), 4: (1, {'@': 18})}, 11: {0: (1, {'@': 23}), 1: (1, {'@': 23}), 3: (1, {'@': 23}), 4: (1, {'@': 23})}, 12: {1: (0, 13), 4: (1, {'@': 26})}, 13: {16: (0, 23), 15: (0, 0), 9: (0, 21)}, 14: {0: (1, {'@': 19}), 1: (1, {'@': 19}), 3: (1, {'@': 19}), 4: (1, {'@': 19})}, 15: {0: (1, {'@': 15}), 1: (1, {'@': 15}), 3: (1, {'@': 15}), 4: (1, {'@': 15})}, 16: {4: (1, {'@': 31}), 1: (1, {'@': 31})}, 17: {4: (0, 11)}, 18: {18: (0, 20), 1: (0, 32), 0: (0, 22)}, 19: {0: (1, {'@': 16}), 1: (1, {'@': 16}), 3: (1, {'@': 16}), 4: (1, {'@': 16})}, 20: {0: (0, 24), 1: (0, 1)}, 21: {2: (0, 30)}, 22: {0: (1, {'@': 21}), 1: (1, {'@': 21}), 3: (1, {'@': 21}), 4: (1, {'@': 21})}, 23: {4: (1, {'@': 32}), 1: (1, {'@': 32})}, 24: {0: (1, {'@': 20}), 1: (1, {'@': 20}), 3: (1, {'@': 20}), 4: (1, {'@': 20})}, 25: {5: (0, 8), 6: (0, 14), 7: (0, 9), 8: (0, 19), 9: (0, 15), 10: (0, 26), 12: (0, 10), 19: (0, 2), 14: (0, 4), 13: (0, 27), 11: (0, 28), 15: (0, 0)}, 26: {0: (0, 31), 8: (0, 19), 9: (0, 15), 10: (0, 26), 11: (0, 18), 14: (0, 4), 5: (0, 8), 13: (0, 27), 7: (0, 9), 15: (0, 0), 6: (0, 14), 12: (0, 10)}, 27: {0: (1, {'@': 13}), 1: (1, {'@': 13}), 3: (1, {'@': 13}), 4: (1, {'@': 13})}, 28: {3: (1, {'@': 12})}, 29: {4: (1, {'@': 25}), 1: (1, {'@': 25})}, 30: {5: (0, 8), 6: (0, 14), 7: (0, 9), 8: (0, 19), 9: (0, 15), 11: (0, 29), 10: (0, 26), 12: (0, 10), 13: (0, 27), 14: (0, 4), 15: (0, 0)}, 31: {0: (1, {'@': 22}), 1: (1, {'@': 22}), 3: (1, {'@': 22}), 4: (1, {'@': 22})}, 32: {5: (0, 8), 6: (0, 14), 7: (0, 9), 8: (0, 19), 9: (0, 15), 10: (0, 26), 12: (0, 10), 13: (0, 27), 11: (0, 5), 14: (0, 4), 15: (0, 0)}, 33: {20: (0, 12), 1: (0, 7), 4: (1, {'@': 27})}}, 'start_states': {'start': 25}, 'end_states': {'start': 2}}, '__type__': 'ParsingFrontend'}, 'rules': [{'@': 12}, {'@': 13}, {'@': 14}, {'@': 15}, {'@': 16}, {'@': 17}, {'@': 18}, {'@': 19}, {'@': 20}, {'@': 21}, {'@': 22}, {'@': 23}, {'@': 24}, {'@': 25}, {'@': 26}, {'@': 27}, {'@': 28}, {'@': 29}, {'@': 30}, {'@': 31}, {'@': 32}], 'options': {'debug': False, 'keep_all_tokens': False, 'tree_class': None, 'cache': False, 'postlex': None, 'parser': 'lalr', 'lexer': 'contextual', 'transformer': None, 'start': ['start'], 'priority': 'normal', 'ambiguity': 'auto', 'regex': False, 'propagate_positions': True, 'lexer_callbacks': {}, 'maybe_placeholders': False, 'edit_terminals': None, 'g_regex_flags': 0, 'use_bytes': False, 'import_paths': [], 'source_path': None, '_plugins': {}}, '__type__': 'Lark'} 

3043) 

3044MEMO = ( 

3045{0: {'name': 'SIGNED_NUMBER', 'pattern': {'value': '(?:(?:\\+|\\-))?(?:(?:(?:[0-9])+(?:e|E)(?:(?:\\+|\\-))?(?:[0-9])+|(?:(?:[0-9])+\\.(?:(?:[0-9])+)?|\\.(?:[0-9])+)(?:(?:e|E)(?:(?:\\+|\\-))?(?:[0-9])+)?)|(?:[0-9])+)', 'flags': [], '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 1: {'name': 'ESCAPED_STRING', 'pattern': {'value': '".*?(?<!\\\\)(\\\\\\\\)*?"', 'flags': [], '_width': [2, 4294967295], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 2: {'name': 'WS', 'pattern': {'value': '(?:[ \t\x0c\r\n])+', 'flags': [], '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 3: {'name': 'COLON', 'pattern': {'value': ':', 'flags': [], '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 4: {'name': 'TRUE', 'pattern': {'value': 'true', 'flags': [], '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 5: {'name': 'FALSE', 'pattern': {'value': 'false', 'flags': [], '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 6: {'name': 'NULL', 'pattern': {'value': 'null', 'flags': [], '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 7: {'name': 'COMMA', 'pattern': {'value': ',', 'flags': [], '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 8: {'name': 'LSQB', 'pattern': {'value': '[', 'flags': [], '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 9: {'name': 'RSQB', 'pattern': {'value': ']', 'flags': [], '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 10: {'name': 'LBRACE', 'pattern': {'value': '{', 'flags': [], '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 11: {'name': 'RBRACE', 'pattern': {'value': '}', 'flags': [], '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 12: {'origin': {'name': Token('RULE', 'start'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 13: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'object', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 14: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'array', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 15: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'string', '__type__': 'NonTerminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 16: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'SIGNED_NUMBER', 'filter_out': False, '__type__': 'Terminal'}], 'order': 3, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 17: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'TRUE', 'filter_out': True, '__type__': 'Terminal'}], 'order': 4, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 18: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'FALSE', 'filter_out': True, '__type__': 'Terminal'}], 'order': 5, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 19: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'NULL', 'filter_out': True, '__type__': 'Terminal'}], 'order': 6, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 20: {'origin': {'name': Token('RULE', 'array'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LSQB', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}, {'name': '__array_star_0', '__type__': 'NonTerminal'}, {'name': 'RSQB', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 21: {'origin': {'name': Token('RULE', 'array'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LSQB', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}, {'name': 'RSQB', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 22: {'origin': {'name': Token('RULE', 'array'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LSQB', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'RSQB', 'filter_out': True, '__type__': 'Terminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': [False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 23: {'origin': {'name': Token('RULE', 'object'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LBRACE', 'filter_out': True, '__type__': 'Terminal'}, {'name': '_items', '__type__': 'NonTerminal'}, {'name': 'RBRACE', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 24: {'origin': {'name': Token('RULE', 'object'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LBRACE', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'RBRACE', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 25: {'origin': {'name': Token('RULE', 'item'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'string', '__type__': 'NonTerminal'}, {'name': 'COLON', 'filter_out': False, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 26: {'origin': {'name': Token('RULE', '_items'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'item', '__type__': 'NonTerminal'}, {'name': '___items_star_1', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 27: {'origin': {'name': Token('RULE', '_items'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'item', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 28: {'origin': {'name': Token('RULE', 'string'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'ESCAPED_STRING', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 29: {'origin': {'name': '__array_star_0', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 30: {'origin': {'name': '__array_star_0', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__array_star_0', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 31: {'origin': {'name': '___items_star_1', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'item', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 32: {'origin': {'name': '___items_star_1', '__type__': 'NonTerminal'}, 'expansion': [{'name': '___items_star_1', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'item', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}} 

3046) 

3047Shift = 0 

3048Reduce = 1 

3049def Lark_StandAlone(**kwargs): 

3050 return Lark._load_from_dict(DATA, MEMO, **kwargs)