Skip to content

Commit

Permalink
Drop support to token_prefix option
Browse files Browse the repository at this point in the history
No more accept token_prefix as a option in order to enforce using RST
field-list as a token.
  • Loading branch information
elyezer committed Nov 18, 2016
1 parent b65d14e commit 68ec79b
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 56 deletions.
8 changes: 1 addition & 7 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,7 @@ minimum-tokens
.. note::

To help test case parsing, make sure that each test case docstring has the
tokens in the following format ``{token_prefix}token{token_suffix}``,
where:

token_prefix
This is configurable and by default, it is ``:``.
token_suffix
This is not configurable and should always be ``:``.
tokens in the following format ``:token:``.

Sample Test Case
++++++++++++++++
Expand Down
1 change: 0 additions & 1 deletion testimony/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ def __init__(self, function_def, parent_class=None, testmodule=None):
self.parser = DocstringParser(
SETTINGS.get('tokens'),
SETTINGS.get('minimum_tokens'),
SETTINGS.get('token_prefix', ':'),
)
self._parse_docstring()

Expand Down
17 changes: 1 addition & 16 deletions testimony/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,36 +5,21 @@
from testimony import SETTINGS, constants, main


def _validate_token_prefix(ctx, param, value):
"""Ensure single character for token prefix."""
if len(value) != 1:
raise click.BadParameter('token prefix should be a single character.')
else:
return value


@click.command()
@click.option('-j', '--json', help='JSON output', is_flag=True)
@click.option('-n', '--nocolor', default=False, help='Color output',
is_flag=True)
@click.option('--tokens', help='Comma separated list of expected tokens')
@click.option(
'--minimum-tokens', help='Comma separated list of minimum expected tokens')
@click.option(
'--token-prefix',
callback=_validate_token_prefix,
default=':',
help='Single character token prefix'
)
@click.argument('report', type=click.Choice(constants.REPORT_TAGS))
@click.argument('path', nargs=-1, type=click.Path(exists=True))
def testimony(
json, nocolor, tokens, minimum_tokens, token_prefix, report, path):
json, nocolor, tokens, minimum_tokens, report, path):
"""Inspect and report on the Python test cases."""
if tokens:
SETTINGS['tokens'] = [token.strip() for token in tokens.split(',')]
if minimum_tokens:
SETTINGS['minimum_tokens'] = [
token.strip() for token in minimum_tokens.split(',')]
SETTINGS['token_prefix'] = token_prefix
main(report, path, json, nocolor)
55 changes: 23 additions & 32 deletions testimony/parser.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# coding=utf-8
"""Docstring parser utilities for Testimony."""
import re
from docutils.core import publish_string
from xml.etree import ElementTree

Expand All @@ -10,7 +9,7 @@
class DocstringParser(object):
"""Parse docstring extracting tokens."""

def __init__(self, tokens=None, minimum_tokens=None, prefix=':'):
def __init__(self, tokens=None, minimum_tokens=None):
"""Initialize the parser with expected tokens and the minimum set."""
if tokens is None:
self.tokens = DEFAULT_TOKENS
Expand All @@ -20,13 +19,8 @@ def __init__(self, tokens=None, minimum_tokens=None, prefix=':'):
self.minimum_tokens = DEFAULT_MINIMUM_TOKENS
else:
self.minimum_tokens = minimum_tokens
self.token_prefix = prefix
self.minimum_tokens = set(self.minimum_tokens)
self.tokens = set(self.tokens)
self.token_regex = re.compile(
r'^{0}(\w+):\s+([^{0}]+)(\n|$)'.format(self.token_prefix),
flags=re.MULTILINE
)
if not self.minimum_tokens.issubset(self.tokens):
raise ValueError('tokens should contain minimum_tokens')

Expand Down Expand Up @@ -58,31 +52,28 @@ def parse(self, docstring=None):
valid_tokens = {}
invalid_tokens = {}

if self.token_prefix == ':':
docstring_xml = publish_string(docstring, writer_name='xml')
root = ElementTree.fromstring(docstring_xml)
tokens = root.findall('./field_list/field')
for token in tokens:
token_name = token.find('./field_name').text.lower()
value_el = token.find('./field_body/')
if value_el is None:
invalid_tokens[token_name] = ''
continue
if value_el.tag == 'paragraph':
value = value_el.text
if value_el.tag == 'enumerated_list':
value_lst = map(lambda elem: elem.text,
value_el.findall('./list_item/paragraph'))
list_enum = list(enumerate(value_lst, start=1))
steps = map(lambda val: '{}. {}'.format(val[0], val[1]),
list_enum)
value = '\n'.join(steps)
tokens_dict[token_name] = value
else:
for match in self.token_regex.finditer(docstring):
token = match.group(1).strip().lower()
value = match.group(2).strip()
tokens_dict[token] = value
# Parse the docstring with the docutils RST parser and output the
# result as XML, this ease the process of getting the tokens
# information.
docstring_xml = publish_string(docstring, writer_name='xml')
root = ElementTree.fromstring(docstring_xml)
tokens = root.findall('./field_list/field')
for token in tokens:
token_name = token.find('./field_name').text.lower()
value_el = token.find('./field_body/')
if value_el is None:
invalid_tokens[token_name] = ''
continue
if value_el.tag == 'paragraph':
value = value_el.text
if value_el.tag == 'enumerated_list':
value_lst = map(lambda elem: elem.text,
value_el.findall('./list_item/paragraph'))
list_enum = list(enumerate(value_lst, start=1))
steps = map(lambda val: '{}. {}'.format(val[0], val[1]),
list_enum)
value = '\n'.join(steps)
tokens_dict[token_name] = value

for token, value in tokens_dict.items():
if token in self.tokens:
Expand Down

0 comments on commit 68ec79b

Please sign in to comment.