Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: m-labs/pythonparser
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: 8861f5db83d7
Choose a base ref
...
head repository: m-labs/pythonparser
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: da78a09bc20e
Choose a head ref
  • 4 commits
  • 4 files changed
  • 1 contributor

Commits on May 3, 2015

  1. Correctly use sys.version_info.

    whitequark committed May 3, 2015
    Copy the full SHA
    e607e7e View commit details
  2. Update ast.shim for Python 3.

    whitequark committed May 3, 2015
    Copy the full SHA
    59a1fb5 View commit details
  3. Python 3 compatibility.

    whitequark committed May 3, 2015
    Copy the full SHA
    02dedc5 View commit details
  4. Python 3 iterator compatibility.

    whitequark committed May 3, 2015
    Copy the full SHA
    da78a09 View commit details
Showing with 39 additions and 22 deletions.
  1. +1 −1 pyparser/coverage/__init__.py
  2. +19 −18 pyparser/parser.py
  3. +14 −1 pyparser/shim/ast.py
  4. +5 −2 pyparser/test/test_parser.py
2 changes: 1 addition & 1 deletion pyparser/coverage/__init__.py
Original file line number Diff line number Diff line change
@@ -61,7 +61,7 @@ def report(parser, name='parser'):
total_covered = 0
for rule in parser._all_rules:
pts = len(rule.covered)
covered = len(filter(lambda x: x, rule.covered))
covered = len(list(filter(lambda x: x, rule.covered)))
if covered == 0:
klass, hint = 'uncovered', None
elif covered < pts:
37 changes: 19 additions & 18 deletions pyparser/parser.py
Original file line number Diff line number Diff line change
@@ -3,6 +3,7 @@
"""

from __future__ import absolute_import, division, print_function, unicode_literals
from functools import reduce
from . import source, diagnostic, lexer, ast

# Generic LL parsing combinators
@@ -114,7 +115,7 @@ def Seq(first_rule, *rest_of_rules, **kwargs):
A rule that accepts a sequence of tokens satisfying ``rules`` and returns a tuple
containing their return values, or None if the first rule was not satisfied.
"""
rest_of_rules = map(Expect, rest_of_rules)
rest_of_rules = list(map(Expect, rest_of_rules))
@llrule(kwargs.get('loc', None), first_rule.expected)
def rule(parser):
first_result = first_rule(parser)
@@ -350,8 +351,8 @@ def decorator(at_loc, dotted_name, call_opt, newline_loc):
@action(Seq(Rule('decorators'), Alt(Rule('classdef'), Rule('funcdef'))))
def decorated(self, decorators, classfuncdef):
"""decorated: decorators (classdef | funcdef)"""
classfuncdef.at_locs = map(lambda x: x[0], decorators)
classfuncdef.decorator_list = map(lambda x: x[0], decorators)
classfuncdef.at_locs = list(map(lambda x: x[0], decorators))
classfuncdef.decorator_list = list(map(lambda x: x[0], decorators))
return classfuncdef

@action(Seq(Loc('def'), Tok('ident'), Rule('parameters'), Loc(':'), Rule('suite')))
@@ -413,8 +414,8 @@ def expr_stmt_1(self, augassign, rhs_expr):
@action(Star(Seq(Loc('='), Alt(Rule('yield_expr'), Rule('testlist')))))
def expr_stmt_2(self, seq):
if len(seq) > 0:
return ast.Assign(targets=map(lambda x: x[1], seq[:-1]), value=seq[-1][1],
op_locs=map(lambda x: x[0], seq))
return ast.Assign(targets=list(map(lambda x: x[1], seq[:-1])), value=seq[-1][1],
op_locs=list(map(lambda x: x[0], seq)))
else:
return None

@@ -432,7 +433,7 @@ def expr_stmt(self, lhs, rhs):
rhs.loc = rhs.target.loc.join(rhs.value.loc)
return rhs
elif rhs is not None:
rhs.targets = map(self._assignable, [lhs] + rhs.targets)
rhs.targets = list(map(self._assignable, [lhs] + rhs.targets))
rhs.loc = lhs.loc.join(rhs.value.loc)
return rhs
else:
@@ -468,7 +469,7 @@ def print_stmt(self, print_loc, stmt):
@action(Seq(Loc('del'), Rule('exprlist')))
def del_stmt(self, stmt_loc, exprs):
"""del_stmt: 'del' exprlist"""
return ast.Delete(targets=map(self._assignable, exprs),
return ast.Delete(targets=list(map(self._assignable, exprs)),
loc=stmt_loc.join(exprs.loc), keyword_loc=stmt_loc)

@action(Loc('pass'))
@@ -580,12 +581,12 @@ def dotted_as_name(self, dotted_name, as_name_opt):
def dotted_name(self, idents):
"""dotted_name: NAME ('.' NAME)*"""
return idents[0].loc.join(idents[-1].loc), \
'.'.join(map(lambda x: x.value, idents))
'.'.join(list(map(lambda x: x.value, idents)))

@action(Seq(Loc('global'), List(Tok('ident'), ',', trailing=False)))
def global_stmt(self, keyword_loc, names):
"""global_stmt: 'global' NAME (',' NAME)*"""
return ast.Global(names=map(ast.Name, names),
return ast.Global(names=list(map(ast.Name, names)),
keyword_loc=keyword_loc)

@action(Seq(Loc('exec'), Rule('expr'),
@@ -759,9 +760,9 @@ def or_test(self, lhs, rhs):
"""or_test: and_test ('or' and_test)*"""
if len(rhs) > 0:
return ast.BoolOp(op=ast.Or(),
values=[lhs] + map(lambda x: x[1], rhs),
values=[lhs] + list(map(lambda x: x[1], rhs)),
loc=lhs.loc.join(rhs[-1][1].loc),
op_locs=map(lambda x: x[0], rhs))
op_locs=list(map(lambda x: x[0], rhs)))
else:
return lhs

@@ -770,9 +771,9 @@ def and_test(self, lhs, rhs):
"""and_test: not_test ('and' not_test)*"""
if len(rhs) > 0:
return ast.BoolOp(op=ast.And(),
values=[lhs] + map(lambda x: x[1], rhs),
values=[lhs] + list(map(lambda x: x[1], rhs)),
loc=lhs.loc.join(rhs[-1][1].loc),
op_locs=map(lambda x: x[0], rhs))
op_locs=list(map(lambda x: x[0], rhs)))
else:
return lhs

@@ -788,8 +789,8 @@ def not_test_1(self, op, operand):
def comparison(self, lhs, rhs):
"""comparison: expr (comp_op expr)*"""
if len(rhs) > 0:
return ast.Compare(left=lhs, ops=map(lambda x: x[0], rhs),
comparators=map(lambda x: x[1], rhs),
return ast.Compare(left=lhs, ops=list(map(lambda x: x[0], rhs)),
comparators=list(map(lambda x: x[1], rhs)),
loc=lhs.loc.join(rhs[-1][1].loc))
else:
return lhs
@@ -1003,9 +1004,9 @@ def testlist(self, exprs):
@action(List(Seq(Rule('test'), Loc(':'), Rule('test')), ',', trailing=True))
def dictmaker(self, elts):
"""dictmaker: test ':' test (',' test ':' test)* [',']"""
return ast.Dict(keys=map(lambda x: x[0], elts),
values=map(lambda x: x[2], elts),
colon_locs=map(lambda x: x[1], elts))
return ast.Dict(keys=list(map(lambda x: x[0], elts)),
values=list(map(lambda x: x[2], elts)),
colon_locs=list(map(lambda x: x[1], elts)))

@action(Seq(Loc('class'), Tok('ident'),
Opt(BeginEnd('(', Rule('testlist'), ')')),
15 changes: 14 additions & 1 deletion pyparser/shim/ast.py
Original file line number Diff line number Diff line change
@@ -7,7 +7,7 @@
from ast import *
import sys

if sys.version <= (2, 6):
if sys.version_info <= (2, 6):
class DictComp(expr):
_fields = ('key', 'value', 'generators')

@@ -16,3 +16,16 @@ class Set(expr):

class SetComp(expr):
_fields = ('elt', 'generators')

if sys.version_info >= (3,):
class Repr(expr):
_fields = ('value',)

class Exec(expr):
_fields = ('body', 'globals', 'locals')

class Print(expr):
_fields = ('dest', 'values', 'nl')

class TryExcept(stmt):
_fields = ('body', 'handlers', 'orelse')
7 changes: 5 additions & 2 deletions pyparser/test/test_parser.py
Original file line number Diff line number Diff line change
@@ -3,7 +3,10 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from .. import source, lexer, diagnostic, ast, coverage
from ..coverage import parser
import unittest, re
import unittest, sys, re

if sys.version_info >= (3,):
def unicode(x): return x

def tearDownModule():
coverage.report(parser)
@@ -41,7 +44,7 @@ def flatten_ast(self, node):
if isinstance(value, ast.AST):
value = self.flatten_ast(value)
if isinstance(value, list) and len(value) > 0 and isinstance(value[0], ast.AST):
value = map(self.flatten_ast, value)
value = list(map(self.flatten_ast, value))
flat_node[unicode(field)] = value
return flat_node