Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: m-labs/pythonparser
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: 8eddbb17559c
Choose a base ref
...
head repository: m-labs/pythonparser
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: b4d565374a25
Choose a head ref
  • 3 commits
  • 7 files changed
  • 1 contributor

Commits on May 9, 2015

  1. Improve docs on pyparser.parse.

    whitequark committed May 9, 2015
    Copy the full SHA
    7e7358a View commit details
  2. Don't tie ast to Python's builtin ast.

    whitequark committed May 9, 2015
    Copy the full SHA
    4f53372 View commit details
  3. Add Python 2.7 support.

    whitequark committed May 9, 2015
    Copy the full SHA
    b4d5653 View commit details
Showing with 475 additions and 245 deletions.
  1. +1 −1 doc/index.rst
  2. +6 −3 pyparser/__init__.py
  3. +208 −116 pyparser/ast.py
  4. +138 −43 pyparser/parser.py
  5. 0 pyparser/shim/__init__.py
  6. +0 −31 pyparser/shim/ast.py
  7. +122 −51 pyparser/test/test_parser.py
2 changes: 1 addition & 1 deletion doc/index.rst
Original file line number Diff line number Diff line change
@@ -46,7 +46,7 @@ for every token.
expr, BinOp, BoolOp, Call, Compare, Dict, DictComp, GeneratorExp, IfExp, Lambda,
List, ListComp, Name, Num, Repr, Set, SetComp, Str, Subscript, Tuple, UnaryOp, Yield,
keyword,
mod, Expression, Interactive, Module, Suite,
mod, Expression, Interactive, Module,
operator, Add, BitAnd, BitOr, BitXor, Div, FloorDiv, LShift, Mod, Mult, Pow, RShift, Sub,
slice, Ellipsis, ExtSlice, Index, Slice,
stmt, Assert, Assign, AugAssign, Break, ClassDef, Continue, Delete, Exec, Expr, For,
9 changes: 6 additions & 3 deletions pyparser/__init__.py
Original file line number Diff line number Diff line change
@@ -2,7 +2,7 @@
import sys, pyparser.source, pyparser.lexer, pyparser.parser

def parse(source, filename='<unknown>', mode='exec',
flags=[], version=sys.version_info[0:2]):
flags=[], version=None):
"""
Parse a string into an abstract syntax tree.
This is the replacement for the built-in :meth:`..ast.parse`.
@@ -17,18 +17,21 @@ def parse(source, filename='<unknown>', mode='exec',
:param flags: (list of string) Future flags.
Equivalent to ``from __future__ import <flags>``.
:param version: (2-tuple of int) Major and minor version of Python
syntax to recognize.
syntax to recognize, ``sys.version_info[0:2]`` by default.
:return: (:class:`ast.AST`) abstract syntax tree
:raise: :class:`diagnostic.DiagnosticException`
if the source code is not well-formed
"""
if version is None:
version = sys.version_info[0:2]

buffer = pyparser.source.Buffer(source, filename)

lexer = pyparser.lexer.Lexer(buffer, version)
if mode in ('single', 'eval'):
lexer.interactive = True

parser = pyparser.parser.Parser(lexer)
parser = pyparser.parser.Parser(lexer, version)
parser.add_flags(flags)

if mode == 'exec':
324 changes: 208 additions & 116 deletions pyparser/ast.py

Large diffs are not rendered by default.

181 changes: 138 additions & 43 deletions pyparser/parser.py
Original file line number Diff line number Diff line change
@@ -336,8 +336,11 @@ def rule(parser, begin_loc, node, end_loc):
# Collection nodes don't have loc yet. If a node has loc at this
# point, it means it's an expression passed in parentheses.
if node.loc is None and type(node) in [
ast.List, ast.Dict, ast.Tuple, ast.Repr,
ast.ListComp, ast.GeneratorExp,
ast.List, ast.ListComp,
ast.Dict, ast.DictComp,
ast.Set, ast.SetComp,
ast.GeneratorExp,
ast.Tuple, ast.Repr,
ast.Call, ast.Subscript,
ast.arguments]:
node.begin_loc, node.end_loc, node.loc = \
@@ -348,7 +351,9 @@ def rule(parser, begin_loc, node, end_loc):
class Parser:

# Generic LL parsing methods
def __init__(self, lexer):
def __init__(self, lexer, version):
self._init_version(version)

self.lexer = lexer
self._tokens = []
self._index = -1
@@ -388,7 +393,15 @@ def _accept(self, expected_kind):
return result
return unmatched

# Helper methods
# Python-specific methods
def _init_version(self, version):
if version == (2, 6):
self.with_stmt = self.with_stmt__26
self.atom_6 = self.atom_6__26
elif version == (2, 7):
self.with_stmt = self.with_stmt__27
self.atom_6 = self.atom_6__27

def _wrap_tuple(self, elts):
assert len(elts) > 0
if len(elts) > 1:
@@ -423,11 +436,11 @@ def _empty_arglist(self):
return ast.Call(args=[], keywords=[], starargs=None, kwargs=None,
star_loc=None, dstar_loc=None, loc=None)

# Python-specific methods
def add_flags(self, flags):
if 'print_function' in flags:
self.lexer.print_function = True

# Grammar
@action(Expect(Alt(Newline(),
Rule('simple_stmt'),
SeqN(0, Rule('compound_stmt'), Newline()))))
@@ -953,17 +966,40 @@ def try_stmt(self, try_loc, try_colon_loc, body, stmt):
return stmt

@action(Seq(Loc('with'), Rule('test'), Opt(Rule('with_var')), Loc(':'), Rule('suite')))
def with_stmt(self, with_loc, context, with_var, colon_loc, body):
"""with_stmt: 'with' test [ with_var ] ':' suite"""
as_loc = optional_vars = None
def with_stmt__26(self, with_loc, context, with_var, colon_loc, body):
"""(2.6) with_stmt: 'with' test [ with_var ] ':' suite"""
if with_var:
as_loc, optional_vars = with_var
return ast.With(context_expr=context, optional_vars=optional_vars, body=body,
keyword_loc=with_loc, as_loc=as_loc, colon_loc=colon_loc,
item = ast.withitem(context_expr=context, optional_vars=optional_vars,
as_loc=as_loc, loc=context.loc.join(optional_vars.loc))
else:
item = ast.withitem(context_expr=context, optional_vars=None,
as_loc=None, loc=context.loc)
return ast.With(items=[item], body=body,
keyword_loc=with_loc, colon_loc=colon_loc,
loc=with_loc.join(body[-1].loc))

with_var = Seq(Loc('as'), Rule('expr'))
"""with_var: 'as' expr"""
"""(2.6) with_var: 'as' expr"""

@action(Seq(Loc('with'), List(Rule('with_item'), ',', trailing=False), Loc(':'),
Rule('suite')))
def with_stmt__27(self, with_loc, items, colon_loc, body):
"""(2.7) with_stmt: 'with' with_item (',' with_item)* ':' suite"""
return ast.With(items=items, body=body,
keyword_loc=with_loc, colon_loc=colon_loc,
loc=with_loc.join(body[-1].loc))

@action(Seq(Rule('test'), Opt(Seq(Loc('as'), Rule('expr')))))
def with_item(self, context, as_opt):
"""(2.7) with_item: test ['as' expr]"""
if as_opt:
as_loc, optional_vars = as_opt
return ast.withitem(context_expr=context, optional_vars=optional_vars,
as_loc=as_loc, loc=context.loc.join(optional_vars.loc))
else:
return ast.withitem(context_expr=context, optional_vars=None,
as_loc=None, loc=context.loc)

@action(Seq(Loc('except'),
Opt(Seq(Rule('test'),
@@ -1134,20 +1170,30 @@ def atom_5(self, strings):
begin_loc=strings[0].begin_loc, end_loc=strings[-1].end_loc,
loc=strings[0].loc.join(strings[-1].loc))

atom = Alt(BeginEnd('(', Opt(Alt(Rule('yield_expr'), Rule('testlist_gexp'))), ')',
atom_6__26 = Rule('dictmaker')
atom_6__27 = Rule('dictorsetmaker')

atom = Alt(BeginEnd('(', Opt(Alt(Rule('yield_expr'), Rule('testlist_comp'))), ')',
empty=lambda: ast.Tuple(elts=[], ctx=None, loc=None)),
BeginEnd('[', Opt(Rule('listmaker')), ']',
empty=lambda: ast.List(elts=[], ctx=None, loc=None)),
BeginEnd('{', Opt(Rule('dictmaker')), '}',
BeginEnd('{', Opt(Rule('atom_6')), '}',
empty=lambda: ast.Dict(keys=[], values=[], colon_locs=[],
ctx=None, loc=None)),
loc=None)),
BeginEnd('`', atom_1, '`'),
atom_2, atom_3, atom_5)
"""atom: ('(' [yield_expr|testlist_gexp] ')' |
'[' [listmaker] ']' |
'{' [dictmaker] '}' |
'`' testlist1 '`' |
NAME | NUMBER | STRING+)"""
"""
(2.6) atom: ('(' [yield_expr|testlist_gexp] ')' |
'[' [listmaker] ']' |
'{' [dictmaker] '}' |
'`' testlist1 '`' |
NAME | NUMBER | STRING+)
(2.7) atom: ('(' [yield_expr|testlist_comp] ')' |
'[' [listmaker] ']' |
'{' [dictorsetmaker] '}' |
'`' testlist1 '`' |
NAME | NUMBER | STRING+)
"""

def list_gen_action(self, lhs, rhs):
if rhs is None: # (x)
@@ -1173,21 +1219,24 @@ def listmaker_2(self, elts):
(list_gen_action)
"""listmaker: test ( list_for | (',' test)* [','] )"""

@action(Rule('gen_for'))
def testlist_gexp_1(self, compose):
@action(Rule('comp_for'))
def testlist_comp_1(self, compose):
return ast.GeneratorExp(generators=compose([]), loc=None)

@action(List(Rule('test'), ',', trailing=True, leading=False))
def testlist_gexp_2(self, elts):
def testlist_comp_2(self, elts):
if elts == [] and not elts.trailing_comma:
return None
else:
return ast.Tuple(elts=elts, ctx=None, loc=None)

testlist_gexp = action(
Seq(Rule('test'), Alt(testlist_gexp_1, testlist_gexp_2))) \
testlist_comp = action(
Seq(Rule('test'), Alt(testlist_comp_1, testlist_comp_2))) \
(list_gen_action)
"""testlist_gexp: test ( gen_for | (',' test)* [','] )"""
"""
(2.6) testlist_gexp: test ( gen_for | (',' test)* [','] )
(2.7) testlist_comp: test ( comp_for | (',' test)* [','] )
"""

@action(Seq(Loc('lambda'), Opt(Rule('varargslist')), Loc(':'), Rule('test')))
def lambdef(self, lambda_loc, args_opt, colon_loc, body):
@@ -1274,12 +1323,46 @@ def testlist(self, exprs):

@action(List(Seq(Rule('test'), Loc(':'), Rule('test')), ',', trailing=True))
def dictmaker(self, elts):
"""dictmaker: test ':' test (',' test ':' test)* [',']"""
"""(2.6) dictmaker: test ':' test (',' test ':' test)* [',']"""
return ast.Dict(keys=list(map(lambda x: x[0], elts)),
values=list(map(lambda x: x[2], elts)),
colon_locs=list(map(lambda x: x[1], elts)),
loc=None)

dictorsetmaker_1 = Seq(Rule('test'), Loc(':'), Rule('test'))

@action(Seq(dictorsetmaker_1,
Alt(Rule('comp_for'),
List(dictorsetmaker_1, ',', leading=False, trailing=True))))
def dictorsetmaker_2(self, first, elts):
if isinstance(elts, commalist):
elts.insert(0, first)
return ast.Dict(keys=list(map(lambda x: x[0], elts)),
values=list(map(lambda x: x[2], elts)),
colon_locs=list(map(lambda x: x[1], elts)),
loc=None)
else:
return ast.DictComp(key=first[0], value=first[2], generators=elts([]),
colon_loc=first[1],
begin_loc=None, end_loc=None, loc=None)

@action(Seq(Rule('test'),
Alt(Rule('comp_for'),
List(Rule('test'), ',', leading=False, trailing=True))))
def dictorsetmaker_3(self, first, elts):
if isinstance(elts, commalist):
elts.insert(0, first)
return ast.Set(elts=elts, loc=None)
else:
return ast.SetComp(elt=first, generators=elts([]),
begin_loc=None, end_loc=None, loc=None)

dictorsetmaker = Alt(dictorsetmaker_2, dictorsetmaker_3)
"""
(2.7) dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
(test (comp_for | (',' test)* [','])) )
"""

@action(Seq(Loc('class'), Tok('ident'),
Opt(Seq(Loc('('), List(Rule('test'), ',', trailing=True), Loc(')'))),
Loc(':'), Rule('suite')))
@@ -1366,7 +1449,7 @@ def thunk(lhs):
arg_loc=lhs.loc, equals_loc=equals_loc)
return thunk

@action(Opt(Rule('gen_for')))
@action(Opt(Rule('comp_for')))
def argument_2(self, compose_opt):
def thunk(lhs):
if compose_opt:
@@ -1380,13 +1463,16 @@ def thunk(lhs):
@action(Seq(Rule('test'), Alt(argument_1, argument_2)))
def argument(self, lhs, thunk):
# This rule is reformulated to avoid exponential backtracking.
"""argument: test [gen_for] | test '=' test # Really [keyword '='] test"""
"""
(2.6) argument: test [gen_for] | test '=' test # Really [keyword '='] test
(2.7) argument: test [comp_for] | test '=' test
"""
return thunk(lhs)

list_iter = Alt(Rule("list_for"), Rule("list_if"))
"""list_iter: list_for | list_if"""

def list_gen_for_action(self, for_loc, target, in_loc, iter, next_opt):
def list_comp_for_action(self, for_loc, target, in_loc, iter, next_opt):
def compose(comprehensions):
comp = ast.comprehension(
target=target, iter=iter, ifs=[],
@@ -1398,7 +1484,7 @@ def compose(comprehensions):
return comprehensions
return compose

def list_gen_if_action(self, if_loc, cond, next_opt):
def list_comp_if_action(self, if_loc, cond, next_opt):
def compose(comprehensions):
comprehensions[-1].ifs.append(cond)
comprehensions[-1].if_locs.append(if_loc)
@@ -1412,27 +1498,36 @@ def compose(comprehensions):
list_for = action(
Seq(Loc('for'), Rule('exprlist'),
Loc('in'), Rule('testlist_safe'), Opt(Rule('list_iter')))) \
(list_gen_for_action)
(list_comp_for_action)
"""list_for: 'for' exprlist 'in' testlist_safe [list_iter]"""

list_if = action(
Seq(Loc('if'), Rule('old_test'), Opt(Rule('list_iter')))) \
(list_gen_if_action)
(list_comp_if_action)
"""list_if: 'if' old_test [list_iter]"""

gen_iter = Alt(Rule("gen_for"), Rule("gen_if"))
"""gen_iter: gen_for | gen_if"""
comp_iter = Alt(Rule("comp_for"), Rule("comp_if"))
"""
(2.6) gen_iter: gen_for | gen_if
(2.7) comp_iter: comp_for | comp_if
"""

gen_for = action(
comp_for = action(
Seq(Loc('for'), Rule('exprlist'),
Loc('in'), Rule('or_test'), Opt(Rule('gen_iter')))) \
(list_gen_for_action)
"""gen_for: 'for' exprlist 'in' or_test [gen_iter]"""

gen_if = action(
Seq(Loc('if'), Rule('old_test'), Opt(Rule('gen_iter')))) \
(list_gen_if_action)
"""gen_if: 'if' old_test [gen_iter]"""
Loc('in'), Rule('or_test'), Opt(Rule('comp_iter')))) \
(list_comp_for_action)
"""
(2.6) gen_for: 'for' exprlist 'in' or_test [gen_iter]
(2.7) comp_for: 'for' exprlist 'in' or_test [comp_iter]
"""

comp_if = action(
Seq(Loc('if'), Rule('old_test'), Opt(Rule('comp_iter')))) \
(list_comp_if_action)
"""
(2.6) gen_if: 'if' old_test [gen_iter]
(2.7) comp_if: 'if' old_test [comp_iter]
"""

testlist1 = action(List(Rule('test'), ',', trailing=False))(_wrap_tuple)
"""testlist1: test (',' test)*"""
Empty file removed pyparser/shim/__init__.py
Empty file.
31 changes: 0 additions & 31 deletions pyparser/shim/ast.py

This file was deleted.

173 changes: 122 additions & 51 deletions pyparser/test/test_parser.py
Original file line number Diff line number Diff line change
@@ -15,7 +15,9 @@ class ParserTestCase(unittest.TestCase):

maxDiff = None

def parser_for(self, code, version=(2, 6), interactive=False):
versions = [(2, 6), (2, 7)]

def parser_for(self, code, version, interactive=False):
code = code.replace("·", "\n")

self.source_buffer = source.Buffer(code)
@@ -28,18 +30,24 @@ def lexer_next(**args):
return token
self.lexer.next = lexer_next

self.parser = parser.Parser(self.lexer)
self.parser = parser.Parser(self.lexer, version)
return self.parser

def flatten_ast(self, node):
# Validate locs
# Validate locs and fields
for attr in node.__dict__:
if attr.endswith('_loc') or attr.endswith('_locs'):
if attr.endswith('loc') or attr.endswith('_locs'):
self.assertTrue(attr in node._locs,
"%s not in %s._locs" % (attr, repr(node)))
else:
self.assertTrue(attr in node._fields,
"%s not in %s._fields" % (attr, repr(node)))
for loc in node._locs:
self.assertTrue(loc in node.__dict__,
"%s not in %s._locs" % (loc, repr(node)))
"loc %s not in %s" % (loc, repr(node)))
for field in node._fields:
self.assertTrue(field in node.__dict__,
"field %s not in %s" % (field, repr(node)))

flat_node = { 'ty': unicode(type(node).__name__) }
for field in node._fields:
@@ -59,9 +67,9 @@ def flatten_python_ast(self, node):
continue

value = getattr(node, field)
if isinstance(value, ast.AST):
if isinstance(value, pyast.AST):
value = self.flatten_python_ast(value)
if isinstance(value, list) and len(value) > 0 and isinstance(value[0], ast.AST):
if isinstance(value, list) and len(value) > 0 and isinstance(value[0], pyast.AST):
value = list(map(self.flatten_python_ast, value))
flat_node[unicode(field)] = value
return flat_node
@@ -106,55 +114,61 @@ def match_loc(self, ast, matcher, root=lambda x: (0, x)):

matcher_pos = matcher_match.end(0)

def _assertParsesGen(self, expected_flat_ast, code,
loc_matcher="", ast_slicer=lambda x: (0, x),
validate_if=lambda: True):
ast = self.parser_for(code + "\n").file_input()
flat_ast = self.flatten_ast(ast)
python_ast = pyast.parse(code.replace("·", "\n") + "\n")
flat_python_ast = self.flatten_python_ast(python_ast)
self.assertEqual({'ty': 'Module', 'body': expected_flat_ast},
flat_ast)
if validate_if():
def assertParsesGen(self, expected_flat_ast, code,
loc_matcher="", ast_slicer=lambda x: (0, x),
only_if=lambda ver: True, validate_if=lambda: True):
for version in self.versions:
if not only_if(version):
continue

ast = self.parser_for(code + "\n", version).file_input()
flat_ast = self.flatten_ast(ast)
python_ast = pyast.parse(code.replace("·", "\n") + "\n")
flat_python_ast = self.flatten_python_ast(python_ast)
self.assertEqual({'ty': 'Module', 'body': expected_flat_ast},
flat_python_ast)
self.match_loc(ast, loc_matcher, ast_slicer)
flat_ast)
if validate_if():
self.assertEqual({'ty': 'Module', 'body': expected_flat_ast},
flat_python_ast)
self.match_loc(ast, loc_matcher, ast_slicer)

def assertParsesSuite(self, expected_flat_ast, code, loc_matcher="", **kwargs):
self._assertParsesGen(expected_flat_ast, code,
loc_matcher, lambda x: (0, x.body),
**kwargs)
self.assertParsesGen(expected_flat_ast, code,
loc_matcher, lambda x: (0, x.body),
**kwargs)

def assertParsesExpr(self, expected_flat_ast, code, loc_matcher="", **kwargs):
self._assertParsesGen([{'ty': 'Expr', 'value': expected_flat_ast}], code,
loc_matcher, lambda x: (0, x.body[0].value),
**kwargs)
self.assertParsesGen([{'ty': 'Expr', 'value': expected_flat_ast}], code,
loc_matcher, lambda x: (0, x.body[0].value),
**kwargs)

def assertParsesArgs(self, expected_flat_ast, code, loc_matcher="", **kwargs):
self._assertParsesGen([{'ty': 'Expr', 'value': {'ty': 'Lambda', 'body': self.ast_1,
'args': expected_flat_ast}}],
"lambda %s: 1" % code,
loc_matcher, lambda x: (7, x.body[0].value.args),
**kwargs)
self.assertParsesGen([{'ty': 'Expr', 'value': {'ty': 'Lambda', 'body': self.ast_1,
'args': expected_flat_ast}}],
"lambda %s: 1" % code,
loc_matcher, lambda x: (7, x.body[0].value.args),
**kwargs)

def assertParsesToplevel(self, expected_flat_ast, code,
mode="file_input", interactive=False):
ast = getattr(self.parser_for(code, interactive=interactive), mode)()
self.assertEqual(expected_flat_ast, self.flatten_ast(ast))
for version in self.versions:
ast = getattr(self.parser_for(code, version=version, interactive=interactive), mode)()
self.assertEqual(expected_flat_ast, self.flatten_ast(ast))

def assertDiagnoses(self, code, level, reason, args={}, loc_matcher=""):
try:
self.parser_for(code).file_input()
self.fail("Expected a diagnostic")
except diagnostic.DiagnosticException as e:
self.assertEqual(level, e.diagnostic.level)
self.assertEqual(reason, e.diagnostic.reason)
for key in args:
self.assertEqual(args[key], e.diagnostic.arguments[key],
"{{%s}}: \"%s\" != \"%s\"" %
(key, args[key], e.diagnostic.arguments[key]))
self.match_loc([e.diagnostic.location] + e.diagnostic.highlights,
loc_matcher)
for version in self.versions:
try:
self.parser_for(code, version).file_input()
self.fail("Expected a diagnostic")
except diagnostic.DiagnosticException as e:
self.assertEqual(level, e.diagnostic.level)
self.assertEqual(reason, e.diagnostic.reason)
for key in args:
self.assertEqual(args[key], e.diagnostic.arguments[key],
"{{%s}}: \"%s\" != \"%s\"" %
(key, args[key], e.diagnostic.arguments[key]))
self.match_loc([e.diagnostic.location] + e.diagnostic.highlights,
loc_matcher)

def assertDiagnosesUnexpected(self, code, err_token, loc_matcher=""):
self.assertDiagnoses(code,
@@ -492,6 +506,44 @@ def test_dict(self):
" ^ colon_locs.0"
"~~~~~~ loc")

def test_dict_comp(self):
self.assertParsesExpr(
{'ty': 'DictComp', 'key': self.ast_x, 'value': self.ast_y,
'generators': [{'ty': 'comprehension', 'target': self.ast_z,
'iter': self.ast_t, 'ifs': []}]},
"{x: y for z in t}",
"^ begin_loc"
" ^ end_loc"
" ^ colon_loc"
"~~~~~~~~~~~~~~~~~ loc",
only_if=lambda ver: ver >= (2, 7))

def test_set(self):
self.assertParsesExpr(
{'ty': 'Set', 'elts': [self.ast_1]},
"{1}",
"^ begin_loc"
" ^ end_loc"
"~~~ loc",
only_if=lambda ver: ver >= (2, 7))

self.assertParsesExpr(
{'ty': 'Set', 'elts': [self.ast_1, self.ast_2]},
"{1, 2}",
"~~~~~~ loc",
only_if=lambda ver: ver >= (2, 7))

def test_set_comp(self):
self.assertParsesExpr(
{'ty': 'SetComp', 'elt': self.ast_x,
'generators': [{'ty': 'comprehension', 'target': self.ast_y,
'iter': self.ast_z, 'ifs': []}]},
"{x for y in z}",
"^ begin_loc"
" ^ end_loc"
"~~~~~~~~~~~~~~ loc",
only_if=lambda ver: ver >= (2, 7))

def test_repr(self):
self.assertParsesExpr(
{'ty': 'Repr', 'value': self.ast_1},
@@ -1294,19 +1346,38 @@ def test_finally(self):

def test_with(self):
self.assertParsesSuite(
[{'ty': 'With', 'context_expr': self.ast_x, 'optional_vars': None,
'body': [self.ast_expr_1]}],
[{'ty': 'With', 'body': [self.ast_expr_1],
'items': [{'ty': 'withitem', 'context_expr': self.ast_x, 'optional_vars': None}]}],
"with x:· 1",
"~~~~ 0.keyword_loc"
" ~ 0.items.0.loc"
" ^ 0.colon_loc"
"~~~~~~~~~~~ 0.loc")
"~~~~~~~~~~~ 0.loc",
validate_if=lambda: sys.version_info >= (3, 0))

self.assertParsesSuite(
[{'ty': 'With', 'context_expr': self.ast_x, 'optional_vars': self.ast_y,
'body': [self.ast_expr_1]}],
[{'ty': 'With', 'body': [self.ast_expr_1],
'items': [{'ty': 'withitem', 'context_expr': self.ast_x,
'optional_vars': self.ast_y}]}],
"with x as y:· 1",
" ~~ 0.as_loc"
"~~~~~~~~~~~~~~~~ 0.loc")
" ~~ 0.items.0.as_loc"
" ~~~~~~ 0.items.0.loc"
"~~~~~~~~~~~~~~~~ 0.loc",
validate_if=lambda: sys.version_info >= (3, 0))

self.assertParsesSuite(
[{'ty': 'With', 'body': [self.ast_expr_1],
'items': [{'ty': 'withitem', 'context_expr': self.ast_x,
'optional_vars': self.ast_y},
{'ty': 'withitem', 'context_expr': self.ast_t,
'optional_vars': None}]}],
"with x as y, t:· 1",
" ~~ 0.items.0.as_loc"
" ~~~~~~ 0.items.0.loc"
" ~ 0.items.1.loc"
"~~~~~~~~~~~~~~~~~~~ 0.loc",
only_if=lambda ver: ver >= (2, 7),
validate_if=lambda: sys.version_info >= (3, 0))

def test_class(self):
self.assertParsesSuite(