Python's lexer is
tokenize.tokenize(). The syntaxic analyser is
ast.parse() but I don't think it will work with a line fragment.
>>> import io
>>> from tokenize import tokenize
>>> data = "def some_func(x, y"
>>>
>>> for token in tokenize(io.BytesIO(data.encode()).readline):
... print(token)
...
TokenInfo(type=59 (ENCODING), string='utf-8', start=(0, 0), end=(0, 0), line='')
TokenInfo(type=1 (NAME), string='def', start=(1, 0), end=(1, 3), line='def some_func(x, y')
TokenInfo(type=1 (NAME), string='some_func', start=(1, 4), end=(1, 13), line='def some_func(x, y')
TokenInfo(type=53 (OP), string='(', start=(1, 13), end=(1, 14), line='def some_func(x, y')
TokenInfo(type=1 (NAME), string='x', start=(1, 14), end=(1, 15), line='def some_func(x, y')
TokenInfo(type=53 (OP), string=',', start=(1, 15), end=(1, 16), line='def some_func(x, y')
TokenInfo(type=1 (NAME), string='y', start=(1, 17), end=(1, 18), line='def some_func(x, y')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python3.5/tokenize.py", line 597, in _tokenize
raise TokenError("EOF in multi-line statement", (lnum, 0))
tokenize.TokenError: ('EOF in multi-line statement', (2, 0))