h);case\"IS_NOT\":return Sk.ffi.remapToPy(i!=h);default:throw new Sk.builtins.ValueError(\"Unknown comparison\");}}),f};","src/lib/pdb.py":"raise NotImplementedError(\"pdb is not yet implemented in Skulpt\")\n","src/lib/pedal/assertions/assertions.py":"import string\nimport re\n\nfrom pedal.report.imperative import MAIN_REPORT\nfrom pedal.sandbox.result import SandboxResult\nfrom pedal.sandbox.exceptions import SandboxException\nfrom pedal.sandbox.sandbox import DataSandbox\nfrom pedal.assertions.setup import _setup_assertions, AssertionException\n\n# TODO: Allow bundling of assertions to make a table\n\niterable = lambda obj: hasattr(obj,'__iter__') or hasattr(obj,'__getitem__')\n\n_MAX_LENGTH = 80\n\ndef _escape_curly_braces(result):\n return result.replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n\ndef safe_repr(obj, short=False):\n try:\n result = repr(obj)\n except Exception:\n result = object.__repr__(obj)\n if short and len(result) >= _MAX_LENGTH:\n result = result[:_MAX_LENGTH] + ' [truncated]...' \n result = result\n return result\n\n\ntry:\n punctuation_table = str.maketrans(string.punctuation, ' ' * len(string.punctuation))\nexcept AttributeError:\n punctuation_table = None\n\nif punctuation_table is None:\n def strip_punctuation(a_string):\n return ''.join(ch for ch in a_string if ch not in set(string.punctuation))\nelse:\n def strip_punctuation(a_string):\n return a_string.translate(punctuation_table)\n\n\ndef _normalize_string(a_string, numeric_endings=False):\n # Lower case\n a_string = a_string.lower()\n # Remove trailing decimals (TODO: How awful!)\n if numeric_endings:\n a_string = re.sub(r\"(\\s*[0-9]+)\\.[0-9]+(\\s*)\", r\"\\1\\2\", a_string)\n # Remove punctuation\n a_string = strip_punctuation(a_string)\n # Split lines\n lines = a_string.split(\"\\n\")\n normalized = [[piece\n for piece in line.split()]\n for line in lines]\n normalized = [[piece for piece in line if piece]\n for line in normalized\n if line]\n return sorted(normalized)\n\n\ndef equality_test(actual, expected, _exact_strings, _delta, _test_output):\n # Float comparison\n if (isinstance(expected, float) and\n isinstance(actual, (float, int)) and\n abs(actual - expected) < _delta):\n return True\n # Exact Comparison\n if actual == expected:\n return True\n # Inexact string comparison\n if (_exact_strings and isinstance(expected, str) and\n isinstance(actual, str) and\n _normalize_string(actual) == _normalize_string(expected)):\n return True\n # Output comparison\n if _test_output:\n # Inexact output comparison\n normalized_actual = [_normalize_string(line) for line in actual]\n if (isinstance(expected, str) and\n _normalize_string(expected) in normalized_actual):\n return True\n # Exact output comparison\n normalized_expected = [_normalize_string(line) for line in expected]\n if (isinstance(expected, list) and\n normalized_expected == normalized_actual):\n return True\n # Else\n return False\n\n\n# Unittest Asserts\nDELTA = .001\n\n\ndef _fail(code_message, actual_message, expected_message,\n show_expected_value, modify_right, *values):\n normal_values = []\n sandboxed_values = []\n sandboxed_results = []\n if modify_right and values:\n values = values[:-1] + (modify_right(values[-1]), )\n for value in values:\n if is_sandbox_result(value):\n sandboxed_results.append(value)\n value = value._actual_value\n sandboxed_values.append(safe_repr(value))\n else:\n normal_values.append(safe_repr(value))\n if sandboxed_results:\n code_message = _build_context(sandboxed_results, actual_message,\n expected_message, show_expected_value)\n return AssertionException(code_message.format(*(sandboxed_values + normal_values)))\n\n\ndef _build_result_from_target(target, index, quantity):\n if target == \"_\":\n if quantity == 1:\n return \"the result\"\n elif index == 0:\n return \"the first result\"\n else:\n return \"the second result\"\n return \"\"+target+\"
\"\n\ndef _build_context(sandboxed_results, actual_message, expected_message,\n show_expected_value):\n context = []\n calls = []\n inputs = []\n outputs = []\n targets = []\n for result in sandboxed_results:\n # Look up info\n call_id = result._actual_call_id\n sandbox = result._actual_sandbox\n outputs.extend(sandbox.output_contexts[call_id])\n calls.extend(sandbox.call_contexts[call_id])\n inputs.extend(sandbox.input_contexts[call_id])\n targets.append(sandbox.target_contexts[call_id])\n # Actual rendering of text\n if calls:\n calls = [_escape_curly_braces(str(call)) for call in calls]\n context.append(\"I ran:\"+ \"\\n\".join(calls)+ \"
\")\n if inputs:\n inputs = [_escape_curly_braces(str(inp)) for inp in inputs]\n context.append(\"I entered as input:\"+ \"\\n\".join(inputs)+ \"
\")\n actual_message += \":{}
\"\n for i, target in enumerate(targets):\n named_target = _build_result_from_target(target, i, len(targets))\n if target == '_':\n context.append(named_target.capitalize() + \" \"+actual_message)\n else:\n context.append(\"The value of \"+named_target+\" \"+actual_message)\n expected_context = \"But I expected \"\n if len(targets) == 2:\n expected_context += _build_result_from_target(targets[0], 0, 2)\n expected_context += \" \" +expected_message + \" \"\n expected_context += _build_result_from_target(targets[1], 1, 2)\n else:\n expected_context += _build_result_from_target(targets[0], 0, 1)\n expected_context += \" \" + expected_message\n if show_expected_value:\n expected_context += \":{}
\"\n context.append(expected_context)\n return \"\\n\".join(context)\n\n\ndef is_sandbox_result(value):\n if hasattr(value, \"__actual_class__\"):\n if value.__actual_class__ == SandboxResult:\n return True\n return False\n\n\ndef _basic_assertion(left, right, operator, code_comparison_message,\n hc_message, hc_message_past, message, report, contextualize,\n show_expected_value=True, modify_right=None):\n if report is None:\n report = MAIN_REPORT\n _setup_assertions(report)\n context = \"\"\n if message:\n message = \"\\n\"+message\n else:\n message = \"\"\n # TODO: Handle right-side sandbox result\n #if is_sandbox_result(right):\n # right = right._actual_value\n if isinstance(left, Exception):\n return False\n if isinstance(right, Exception):\n return False\n if not operator(left, right):\n failure = _fail(code_comparison_message, hc_message, hc_message_past,\n show_expected_value, modify_right, left, right)\n report['assertions']['collected'].append(failure)\n report.attach('Instructor Test', category='student', tool='Assertions',\n mistake={'message': \"Student code failed instructor test.
\\n\"+\n context+str(failure)+message})\n report['assertions']['failures'] += 1\n if report['assertions']['exceptions']:\n raise failure\n else:\n return False\n return True\n\n\nPRE_VAL = \"\"\n\n\ndef assertEqual(left, right, score=None, message=None, report=None,\n contextualize=True, exact=False, compare_lengths=None):\n if compare_lengths is None:\n compare_lengths = (iterable(left) and isinstance(right, (int, float)))\n if _basic_assertion(left, right,\n lambda l, r:\n equality_test(len(l), r, False, DELTA, False) if\n compare_lengths else\n equality_test(l, r, False, DELTA, False),\n \"len({}) != {}\" if compare_lengths else \"{} != {}\",\n \"was\"+PRE_VAL,\n \"to have its length equal to\" \n if compare_lengths else \"to be equal to\",\n message, report, contextualize):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\n\nassert_equal = assertEqual\n\n\ndef assertNotEqual(left, right, score=None, message=None, report=None,\n contextualize=True, exact=False):\n if _basic_assertion(left, right,\n lambda l, r: not equality_test(l, r, False, DELTA, False),\n \"{} == {}\",\n \"was\"+PRE_VAL,\n \"to not be equal to\",\n message, report, contextualize):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\n\ndef assertTrue(something, score=None, message=None, report=None,\n contextualize=True):\n if _basic_assertion(something, True,\n lambda l, r: bool(l),\n \"{} is true\",\n \"was false\"+PRE_VAL,\n \"to be true\",\n message, report, contextualize,\n show_expected_value=False):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\n\ndef assertFalse(something, score=None, message=None, report=None,\n contextualize=True):\n if _basic_assertion(something, False,\n lambda l, r: not bool(l),\n \"{} is false\",\n \"was true\"+PRE_VAL,\n \"to be false\",\n message, report, contextualize,\n show_expected_value=False):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\n\ndef assertIs(left, right, score=None, message=None):\n pass\n\n\ndef assertIsNot(left, right, score=None, message=None):\n pass\n\ndef _actually_is_none(l, r):\n if is_sandbox_result(l):\n return l._actual_value is None\n return l is None\n\ndef assertIsNone(something, score=None, message=None, report=None,\n contextualize=True):\n if _basic_assertion(something, None,\n _actually_is_none,\n \"{} is none\",\n \"was\"+PRE_VAL,\n \"to be none\",\n message, report, contextualize,\n show_expected_value=False):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\ndef _actually_is_not_none(l, r):\n if is_sandbox_result(l):\n return l._actual_value is not None\n return l is not None\n\ndef assertIsNotNone(something, score=None, message=None, report=None,\n contextualize=True):\n if _basic_assertion(something, None,\n _actually_is_not_none,\n \"{} is not none\",\n \"was\"+PRE_VAL,\n \"to not be none\",\n message, report, contextualize,\n show_expected_value=False):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\n\ndef assertIn(needle, haystack, score=None, message=None, report=None,\n contextualize=True):\n expected_message = \"to be in\"\n if not is_sandbox_result(needle) and is_sandbox_result(haystack):\n expected_message = \"to contain\"\n if _basic_assertion(needle, haystack,\n lambda n, h: n in h,\n \"{} not in {}\",\n \"was\"+PRE_VAL,\n expected_message,\n message, report, contextualize):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\n\ndef assertNotIn(needle, haystack, score=None, message=None, report=None,\n contextualize=True):\n expected_message = \"to not be in\"\n if not is_sandbox_result(needle) and is_sandbox_result(haystack):\n expected_message = \"to not contain\"\n if _basic_assertion(needle, haystack,\n lambda n, h: n not in h,\n \"{} in {}\",\n \"was\"+PRE_VAL,\n expected_message,\n message, report, contextualize):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\ndef _humanize_types(types):\n if isinstance(types, tuple):\n return ', '.join([t.__name__ for t in types])\n return types.__name__\n\ndef assertIsInstance(value, types, score=None, message=None, report=None,\n contextualize=True):\n if _basic_assertion(value, types,\n lambda v, t: isinstance(v, t),\n \"isinstance({}, {})\",\n \"was\"+PRE_VAL,\n \"to be of type\",\n message, report, contextualize,\n modify_right=_humanize_types):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\n\ndef assertNotIsInstance(value, types):\n pass\n\n\ndef assertRaises(exception):\n pass\n\n\ndef assertRaisesRegexp(exception):\n pass\n\n\ndef assertAlmostEqual(left, right):\n pass\n\n\ndef assertNotAlmostEqual(left, right):\n pass\n\n\ndef assertGreater(left, right, score=None, message=None, report=None,\n contextualize=True, compare_lengths=None):\n if compare_lengths is None:\n compare_lengths = (iterable(left) and isinstance(right, (int, float)))\n if _basic_assertion(left, right,\n lambda l, r:\n len(l) > r if\n compare_lengths else\n l > r,\n \"len({}) <= {}\" if compare_lengths else \"{} <= {}\",\n \"was\"+PRE_VAL,\n \"to have its length greater than\" \n if compare_lengths else\n \"to be greater than\",\n message, report, contextualize):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\n\ndef assertGreaterEqual(left, right, score=None, message=None, report=None,\n contextualize=True, compare_lengths=None):\n if compare_lengths is None:\n compare_lengths = (iterable(left) and isinstance(right, (int, float)))\n if _basic_assertion(left, right,\n lambda l, r:\n len(l) >= r if\n compare_lengths else\n l >= r,\n \"len({}) < {}\" if compare_lengths else \"{} < {}\",\n \"was\"+PRE_VAL,\n \"to have its length greater than or equal to\" if compare_lengths else\n \"to be greater than or equal to\",\n message, report, contextualize):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\n\ndef assertLess(left, right, score=None, message=None, report=None,\n contextualize=True, compare_lengths=None):\n if compare_lengths is None:\n compare_lengths = (iterable(left) and isinstance(right, (int, float)))\n if _basic_assertion(left, right,\n lambda l, r:\n len(l) < r if\n compare_lengths else\n l < r,\n \"len({}) >= {}\" if compare_lengths else \"{} >= {}\",\n \"was\"+PRE_VAL,\n \"to have its length less than\" \n if compare_lengths else\n \"to be less than\",\n message, report, contextualize):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\n\ndef assertLessEqual(left, right, score=None, message=None, report=None,\n contextualize=True, compare_lengths=None):\n if compare_lengths is None:\n compare_lengths = (iterable(left) and isinstance(right, (int, float)))\n if _basic_assertion(left, right,\n lambda l, r:\n len(l) <= r if\n compare_lengths else\n l <= r,\n \"len({}) > {}\" if compare_lengths else \"{} > {}\",\n \"was\"+PRE_VAL,\n \"to have its length less than or equal to\" if compare_lengths else\n \"to be less than or equal to\",\n message, report, contextualize):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\n\ndef assertRegexpMatches(text, pattern):\n pass\n\n\ndef assertNotRegexpMatches(text, pattern):\n pass\n\n\ndef assertItemsEqual(left, right):\n pass\n\n\ndef assertDictContainsSubset(left, right):\n pass\n\n\ndef assertMultiLineEqual(left, right):\n pass\n\n\ndef assertSequenceEqual(left, right):\n pass\n\n\n# Speciality Asserts\ndef assertPrints(result, expected_output, args=None, returns=None,\n score=None, message=None, report=None,\n contextualize=True, exact=False):\n if not isinstance(result, SandboxResult):\n return False\n raise TypeError(\"You must pass in a SandboxResult (e.g., using `call`) to assertPrints\")\n if report is None:\n report = MAIN_REPORT\n _setup_assertions(report)\n call_id = result._actual_call_id\n sandbox = result._actual_sandbox\n calls = sandbox.call_contexts[call_id]\n inputs = sandbox.input_contexts[call_id]\n actual_output = sandbox.output_contexts[call_id]\n if not equality_test(actual_output, expected_output, exact, DELTA, True):\n context= []\n if calls:\n context.append(\"I ran:\"+\n \"\\n\".join(map(str, calls))+\n \"
\")\n if inputs:\n context.append(\"I entered as input:\"+\n \"\\n\".join(map(str, inputs))+\n \"
\")\n if actual_output:\n context.append(\"The function printed:\"+\n \"\\n\".join(map(str, actual_output))+\n \"
\")\n else:\n context.append(\"The function printed nothing.\")\n context.append(\"But I expected the output:\"+ \"\\n\".join(map(str, expected_output))+ \"
\")\n failure = AssertionException(\"\\n\".join(context))\n report['assertions']['collected'].append(failure)\n report.attach('Instructor Test', category='student', tool='Assertions',\n mistake={'message': \"Student code failed instructor test.
\\n\"+\n str(failure)})\n report['assertions']['failures'] += 1\n if report['assertions']['exceptions']:\n raise failure\n else:\n return False\n report.give_partial(score)\n return True\n\ndef assertHasFunction(obj, function, args=None, returns=None,\n score=None, message=None, report=None,\n contextualize=True, exact=False):\n # If object is a sandbox, will check the .data[variable] attribute\n # Otherwise, check it directly\n if isinstance(obj, DataSandbox):\n comparison = lambda o, f: f in o.data\n else:\n def comparison(o, f):\n try:\n return f in o\n except:\n return hasattr(o, f)\n if not _basic_assertion(obj, function,\n comparison,\n \"Could not find function {}{}\",\n \"was\"+PRE_VAL,\n \"to have the function\",\n message, report, contextualize):\n return False\n if isinstance(obj, DataSandbox):\n student_function = obj.data[function]\n else:\n try:\n student_function = obj[function]\n except:\n student_function = getattr(obj, function)\n if _basic_assertion(student_function, function,\n lambda l, r: callable(l),\n \"The value {} is in the variable {}, and that value is not a callable function.\",\n \"was callable\"+PRE_VAL,\n \"to be callable\",\n message, report, contextualize,\n show_expected_value=False):\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n return False\n\ndef assertHasClass(sandbox, class_name, attrs=None):\n pass\n\n\ndef assertHas(obj, variable, types=None, value=None, score=None,\n message=None, report=None, contextualize=True):\n # If object is a sandbox, will check the .data[variable] attribute\n # Otherwise, check it directly\n if isinstance(obj, DataSandbox):\n comparison = lambda o, v: v in o.data\n else:\n comparison = lambda o, v: v in hasattr(o, v)\n if not _basic_assertion(obj, variable,\n comparison,\n \"Could not find variable {}{}\",\n \"was\"+PRE_VAL,\n \"to have the variable\",\n message, report, contextualize):\n return False\n if isinstance(obj, DataSandbox):\n student_variable = obj.data[variable]\n else:\n student_variable = getattr(obj, variable)\n if types is not None:\n if not _basic_assertion(student_variable, types,\n lambda v, t: isinstance(v, t),\n \"isinstance({}, {})\",\n \"was\"+PRE_VAL,\n \"to be of type\",\n message, report, contextualize,\n modify_right=_humanize_types):\n return False\n if value is not None:\n if not _basic_assertion(student_variable, value,\n lambda l, r: equality_test(l, r, False, DELTA, False),\n \"{} != {}\",\n \"was\"+PRE_VAL,\n \"to be equal to\",\n message, report, contextualize,\n show_expected_value=False):\n return False\n if report is None:\n report = MAIN_REPORT\n report.give_partial(score)\n return True\n\ndef assertGenerally(expression, score=None, message=None, report=None,\n contextualize=True):\n if report is None:\n report = MAIN_REPORT\n _setup_assertions(report)\n if expression:\n report.give_partial(score)\n return True\n else:\n report['assertions']['failures'] += 1\n if report['assertions']['exceptions']:\n raise AssertionException(\"General assertion\")\n else:\n return False\n\n# Allow addition of new assertions\n# e.g., assertGraphType, assertGraphValues\n","src/lib/pedal/assertions/organizers.py":"'''\n\nSections are a way to separate the pieces of a file such that the pieces do not\ninterfere with each other.\n\nPhases are a way to chunk a collection of functions together. If one of these\nfunctions fails, the other functions in the phase will continue to be evaluated.\nHowever, that phase will still have failed. You can establish that one phase\ncomes before or after another phase; if a precondition phase fails, then the\nsubsequent phase will not run.\n\nExample:\n Students are working on a text adventure game and have to implement a\n function named create_world(). The grading for portion of the assignment\n has three phases:\n 'create_world_exists' which confirms that the function was defined\n 'create_world_returns' which confirms that calling the function\n produces the right result.\n 'create_world_complete' which confirms that the previous phase\n terminated in order to give some partial credit.\n \n Although the 'create_world_exists' phase is composed of one function, the\n 'create_world_returns' phase is actually composed of several functions that\n check the components of the function.\n \n @phase('create_world_exists')\n \n @phase('create_world_returns', after='create_world_exists')\n \nPhases are reset between sections.\n\n'''\n\n\nfrom pedal.report.imperative import MAIN_REPORT\nfrom pedal.assertions.setup import (_setup_assertions, AssertionException,\n _add_relationships, _add_phase)\nfrom functools import wraps\n\ndef contextualize_calls():\n pass\n\n\nclass _finish_section:\n def __init__(self, number, *functions):\n if isinstance(number, int):\n self.number = number\n else:\n self.number = -1\n functions = [number] + list(functions)\n self.functions = functions\n for function in functions:\n self(function, False)\n\n def __call__(self, f=None, quiet=True):\n if f is not None:\n f()\n if quiet:\n print(\"\\tNEXT SECTION\")\n\n def __enter__(self):\n pass\n\n def __exit__(self, x, y, z):\n print(\"\\tNEXT SECTION\")\n # return wrapped_f\n\n\ndef finish_section(number, *functions, next_section=False):\n if len(functions) == 0:\n x = _finish_section(number, *functions)\n x()\n else:\n result = _finish_section(number, *functions)\n if next_section:\n print(\"\\tNEXT SECTION\")\n return result\n\ndef section(*args):\n '''\n TODO: Deprecate?\n '''\n _setup_assertions(MAIN_REPORT)\n def wrap(f):\n _add_phase(phase_name, _handle_entry)\n MAIN_REPORT['assertions']['phases'].append((section_number, f))\n return f\n section_number = -1\n if len(args) >= 1 and callable(args[0]):\n if len(args) >= 2:\n section_number = args[1]\n return wrap(args[0])\n elif len(args) >= 1:\n section_number = args[0]\n return wrap\n\ndef phase(phase_name, before=None, after=None):\n '''\n \n Args:\n phase_name (str): The name of the phase this function will belong to.\n before (list[str] or str): the name(s) of any phases that this phase\n should be before.\n after (list[str] or str): the name(s) of any phases that this phase\n should be after.\n '''\n _setup_assertions(MAIN_REPORT)\n def wrap(f):\n @wraps(f)\n def _handle_entry(*args, **kwargs):\n old_exception_state = MAIN_REPORT['assertions']['exceptions']\n MAIN_REPORT['assertions']['exceptions'] = True\n value = f(*args, **kwargs)\n MAIN_REPORT['assertions']['exceptions'] = old_exception_state\n return value\n _add_phase(phase_name, _handle_entry)\n _add_relationships(phase_name, before)\n _add_relationships(after, phase_name)\n return _handle_entry\n return wrap\n \ndef stop_on_failure(f):\n _setup_assertions(MAIN_REPORT)\n @wraps(f)\n def wrapped(*args, **kwargs):\n old_exception_state = MAIN_REPORT['assertions']['exceptions']\n MAIN_REPORT['assertions']['exceptions'] = True\n value = None\n try:\n value = f(*args, **kwargs)\n except AssertionException:\n pass\n MAIN_REPORT['assertions']['exceptions'] = old_exception_state\n return value\n return wrapped\n\n\ndef try_all():\n _setup_assertions(MAIN_REPORT)\n @wraps(f)\n def wrapped(*args, **kwargs):\n old_exception_state = MAIN_REPORT['assertions']['exceptions']\n MAIN_REPORT['assertions']['exceptions'] = False\n value = f(*args, **kwargs)\n MAIN_REPORT['assertions']['exceptions'] = old_exception_state\n return value\n return wrapped\n\n\ndef precondition(function):\n pass\n\n\ndef postcondition(function):\n pass\n","src/lib/pedal/assertions/setup.py":"import sys\n\nfrom pedal.report.imperative import MAIN_REPORT\nfrom pedal.sandbox.exceptions import SandboxStudentCodeException\n\nclass AssertionException(Exception):\n def __str__(self):\n return self.args[0]\n\ndef _topological_sort(names, orderings):\n visited = set()\n stack = []\n \n def dfs(name):\n visited.add(name)\n if name in orderings:\n for neighbor in orderings[name]:\n if neighbor not in visited:\n dfs(neighbor)\n stack.insert(0, name)\n \n for name in names[::-1]:\n if name not in visited:\n dfs(name)\n return stack\n \n\ndef resolve_all(set_success=False, report=None):\n from pprint import pprint\n if report is None:\n report = MAIN_REPORT\n _setup_assertions(report)\n orderings = report['assertions']['relationships']\n phase_functions = report['assertions']['phase_functions']\n phase_names = report['assertions']['phases']\n phase_names = _topological_sort(phase_names, orderings)\n #pprint(orderings)\n phase_success = False\n for phase_name in phase_names:\n phase_success = True\n for function in phase_functions[phase_name]:\n try:\n phase_success = phase_success and (function() is not False)\n except AssertionException:\n phase_success = False\n except SandboxStudentCodeException:\n phase_success = False\n if not phase_success:\n break\n \n #for f in report.feedback:\n # print(\"\\t\", f, f.mistake, f.misconception)\n if not report['assertions']['failures'] and phase_success and set_success:\n report.set_success()\n \n _reset_phases(report)\n \ndef _add_phase(phase_name, function, report=None):\n if report is None:\n report = MAIN_REPORT\n phase_functions = report['assertions']['phase_functions']\n phases = report['assertions']['phases']\n if phase_name not in phase_functions:\n phase_functions[phase_name] = []\n phases.append(phase_name)\n phase_functions[phase_name].append(function)\n \ndef _add_relationships(befores, afters, report=None):\n if report is None:\n report = MAIN_REPORT\n relationships = report['assertions']['relationships']\n if None in (befores, afters):\n return\n if not isinstance(befores, (list, tuple)):\n befores = [befores]\n if not isinstance(afters, (list, tuple)):\n afters = [afters]\n for before in befores:\n if not isinstance(before, str):\n before = before.__name__\n if before not in relationships:\n relationships[before] = []\n for after in afters:\n if not isinstance(after, str):\n after = after.__name__\n relationships[before].append(after)\n \n \ndef _reset_phases(report=None):\n if report is None:\n report = MAIN_REPORT\n report['assertions']['relationships'].clear()\n report['assertions']['phases'].clear()\n report['assertions']['phase_functions'].clear()\n report['assertions']['failures'] = 0\n\n\ndef _setup_assertions(report):\n if 'assertions' not in report:\n report['assertions'] = {\n 'phases': [],\n 'phase_functions': {},\n 'relationships': {},\n 'exceptions': False,\n 'failures': 0,\n 'collected': [],\n # Should we batch up multiple assertion failures?\n # The grouping mechanism is try_all\n 'tabular_output': False,\n }\n report.add_hook('source.next_section.before', resolve_all)\n report.add_hook('pedal.resolvers.resolve', resolve_all)\n","src/lib/pedal/assertions/__init__.py":"from pedal.report.imperative import MAIN_REPORT\n\nfrom pedal.assertions.setup import _setup_assertions, resolve_all\nfrom pedal.assertions.assertions import *\nfrom pedal.assertions.organizers import *\n\ndef set_assertion_mode(exceptions=True, report=None):\n if report is None:\n report = MAIN_REPORT\n _setup_assertions(report)\n \n report['assertions']['exceptions'] = exceptions\n","src/lib/pedal/cait/ast_helpers.py":"\"\"\"\nA pretty-printing dump function for the ast module. The code was copied from\nthe ast.dump function and modified slightly to pretty-print.\n\nAlex Leone (acleone ~AT~ gmail.com), 2010-01-30\n\nFrom http://alexleone.blogspot.co.uk/2010/01/python-ast-pretty-printer.html\n\"\"\"\n\nfrom ast import AST, iter_fields, parse\n\n\ndef dump(node, annotate_fields=True, include_attributes=False, indent=' '):\n \"\"\"\n Return a formatted dump of the tree in *node*. This is mainly useful for\n debugging purposes. The returned string will show the names and the values\n for fields. This makes the code impossible to evaluate, so if evaluation is\n wanted *annotate_fields* must be set to False. Attributes such as line\n numbers and column offsets are not dumped by default. If this is wanted,\n *include_attributes* can be set to True.\n \"\"\"\n\n def _format(_node, level=0):\n if isinstance(_node, AST):\n fields = [(a, _format(b, level)) for a, b in iter_fields(_node)]\n if include_attributes and _node._attributes:\n fields.extend([(a, _format(getattr(_node, a), level))\n for a in _node._attributes])\n return ''.join([\n _node.__class__.__name__,\n '(',\n ', '.join(('%s=%s' % field for field in fields)\n if annotate_fields else\n (b for a, b in fields)),\n ')'])\n elif isinstance(_node, list):\n lines = ['[']\n lines.extend((indent * (level + 2) + _format(x, level + 2) + ','\n for x in _node))\n if len(lines) > 1:\n lines.append(indent * (level + 1) + ']')\n else:\n lines[-1] += ']'\n return '\\n'.join(lines)\n return repr(_node)\n\n if not isinstance(node, AST):\n raise TypeError('expected AST, got %r' % node.__class__.__name__)\n return _format(node)\n\n\ndef parseprint(code, filename=\"\", mode=\"exec\", **kwargs):\n \"\"\"Parse some code from a string and pretty-print it.\"\"\"\n node = parse(code, mode=mode) # An ode to the code\n print(dump(node, **kwargs))\n\n\n# Short name: pdp = parse, dump, print\npdp = parseprint\n","src/lib/pedal/cait/ast_map.py":"from pedal.cait.cait_node import CaitNode\n\n\nclass AstSymbol:\n \"\"\"\n This represents an Ast symbol, whether it be a variable (name node) or a function name\n for place holders used in instructor patterns\n\n Notes:\n Also has the attributes of the relevant Name node from the ast class.\n\n Attributes:\n id (str): the name of the variable place holder used by the instructor\n ast_node (cait_node): the ast node of the variable\n \"\"\"\n\n def __init__(self, _id=\"\", _node=None):\n self.id = _id\n self.astNode = _node\n self.ast_node = _node\n\n def __getattr__(self, attr):\n return getattr(self.astNode, attr)\n\n def __str__(self):\n # return ''.join([\"id = \", self.id.__str__(), \", astNode = \", type(self.astNode).__name__])\n return self.id\n\n def __repr__(self):\n return ''.join([\"id = \", self.id.__str__(), \", astNode = \", type(self.astNode).__name__])\n\n\nclass AstSymbolList:\n \"\"\"\n This class is a wrapper for a list of AstSymbols for ease of access\n If accessed as a list, manipulable as a list, otherwise, acts as the first AstSymbol in the list\n \"\"\"\n\n def __init__(self):\n self.my_list = []\n\n def __getitem__(self, item):\n return self.my_list.__getitem__(item)\n\n def append(self, item):\n self.my_list.append(item)\n\n def __getattr__(self, attr):\n return getattr(self.my_list[0], attr)\n\n def __len__(self):\n return self.my_list.__len__()\n\n\nclass AstMap:\n def __init__(self):\n self.mappings = {}\n self.symbol_table = {}\n self.exp_table = {}\n self.func_table = {}\n self.conflict_keys = []\n self.match_root = None\n self.diagnosis = \"\"\n\n def add_func_to_sym_table(self, ins_node, std_node):\n \"\"\"\n Adds ins_node.name to the symbol table if it doesn't already exist, mapping it to a set of ins_node. Updates a\n second dictionary that maps ins_node to an std_node, and overwrites the current std_node since there should only\n be one mapping.\n\n Args:\n ins_node: instructor node or str representing a function name\n std_node: student node representing function\n\n Returns:\n int: number of conflicts generated\n\n \"\"\"\n if not isinstance(std_node, CaitNode):\n raise TypeError\n if isinstance(ins_node, str):\n key = ins_node\n else:\n try:\n if ins_node.ast_name == \"FunctionDef\":\n key = ins_node.astNode.name\n else: # TODO: Little skulpt artifact that doesn't raise Attribute Errors...\n key = ins_node._id\n raise AttributeError\n except AttributeError:\n key = ins_node.astNode._id\n\n try:\n if std_node.ast_name == \"FunctionDef\":\n value = AstSymbol(std_node.astNode.name, std_node)\n else: # TODO: Little skulpt artifact that doesn't raise Attribute Errors...\n raise AttributeError\n# value = AstSymbol(std_node.astNode.name, std_node)\n except AttributeError:\n node = std_node\n if type(node.astNode).__name__ != \"Call\":\n node = node.parent\n node._id = std_node._id\n value = AstSymbol(std_node._id, node)\n if key in self.func_table:\n new_list = self.func_table[key]\n if value not in new_list:\n new_list.append(value)\n if not (key in self.conflict_keys):\n for other in new_list:\n if value.id != other.id:\n self.conflict_keys.append(key)\n break\n else:\n new_list = AstSymbolList()\n new_list.append(value)\n\n self.func_table[key] = new_list\n return len(self.conflict_keys)\n\n def add_var_to_sym_table(self, ins_node, std_node):\n \"\"\"\n Adds ins_node._id to the symbol table if it doesn't already exist, mapping it to a set of ins_node. Updates a\n second dictionary that maps ins_node to an std_node, and overwrites the current std_node since there should only\n be one mapping.\n\n Args:\n ins_node: instructor node or str representing variable\n std_node: student node representing variable\n\n Returns:\n int: number of conflicts generated\n\n \"\"\"\n if not isinstance(std_node, CaitNode):\n raise TypeError\n if isinstance(ins_node, str):\n key = ins_node\n else:\n key = ins_node.astNode._id\n value = AstSymbol(std_node.astNode._id, std_node)\n if key in self.symbol_table:\n new_list = self.symbol_table[key]\n new_list.append(value)\n if not (key in self.conflict_keys):\n for other in new_list:\n if value._id != other._id:\n self.conflict_keys.append(key)\n break\n else:\n new_list = AstSymbolList()\n new_list.append(value)\n\n self.symbol_table[key] = new_list\n return len(self.conflict_keys)\n\n def add_exp_to_sym_table(self, ins_node, std_node):\n \"\"\"\n Adds mapping of expression symbol to student node\n This function does NOT check for conflicts at the moment and probably should at some point.\n TODO: Check for conflicts\n Args:\n ins_node: Instructor node representing an expression\n std_node: student ast subtree corresponding to the symbol\n\n Returns:\n None\n \"\"\"\n if not isinstance(std_node, CaitNode):\n raise TypeError\n self.exp_table[ins_node.astNode.id] = std_node\n\n def add_node_pairing(self, ins_node, std_node):\n \"\"\"\n Adds a mapping of instructor ast node to a specific student ast node\n Args:\n ins_node: instructor pattern ast node\n std_node: student ast node\n\n Returns:\n None\n \"\"\"\n if not isinstance(std_node, CaitNode):\n raise TypeError\n self.mappings[ins_node] = std_node\n\n def has_conflicts(self):\n \"\"\"\n\n Returns:\n bool: True if number of conflicts is greater than 0\n \"\"\"\n return len(self.conflict_keys) > 0\n\n def new_merged_map(self, other):\n \"\"\"\n Returns a newly merged map consisting of this and other\n without modifying self.\n Args:\n other (AstMap): the other AstMap to be merged with\n\n Returns:\n AstMap: self modified by adding the contents of other\n \"\"\"\n new_map = AstMap()\n new_map.merge_map_with(self)\n new_map.merge_map_with(other)\n return new_map\n\n def merge_map_with(self, other):\n \"\"\"\n Returns a newly merged map consisting of this and other\n by modifying self\n Args:\n other (AstMap): the other AstMap to be merged with\n\n Returns:\n AstMap: self modified by adding the contents of other\n \"\"\"\n if other is None:\n return\n\n if not isinstance(other, type(self)):\n raise TypeError\n\n # merge all mappings\n self.mappings.update(other.mappings)\n\n # merge all expressions\n self.exp_table.update(other.exp_table)\n\n # merge all symbols\n for key, value in other.symbol_table.items():\n for sub_value in value:\n self.add_var_to_sym_table(key, sub_value.astNode)\n\n # merge all functions\n for key, value in other.func_table.items():\n for sub_value in value:\n self.add_func_to_sym_table(str(key), sub_value.astNode)\n\n @property\n def match_lineno(self):\n \"\"\"\n\n Returns:\n int: the line number this match started on\n \"\"\"\n values = [v.lineno for v in self.mappings.values()\n if v.lineno is not None]\n if not values:\n return -1\n else:\n return min(values)\n\n def __getitem__(self, id_n):\n if id_n.startswith('__'):\n expression = self.exp_table[id_n]\n expression.map = self\n return expression\n else:\n if id_n in self.symbol_table:\n return self.symbol_table[id_n]\n else:\n return self.func_table[id_n]\n\n def __contains__(self, id_n):\n if id_n.startswith('__'):\n return id_n in self.exp_table\n else:\n exists = id_n in self.symbol_table\n if exists:\n return exists\n else:\n return id_n in self.func_table\n","src/lib/pedal/cait/cait_api.py":"from pedal.report import MAIN_REPORT\nfrom pedal.cait.stretchy_tree_matching import StretchyTreeMatcher\nfrom pedal.cait.cait_node import CaitNode\nimport ast\n\n\nclass CaitException(Exception):\n pass\n\n\n\"\"\"\nCaitReport:\n A collection of information from the latest CAIT run.\n\n Attrs:\n ast: The CaitNode tree that was most recently parsed out.\n cache[str:CaitNode]: A dictionary mapping student code (str) to\n parsed representations.\n success: Whether there have been any errors so far.\n error: The exception that occurred, or None if no exception so far.\n\"\"\"\n\n\ndef _parse_source(code, cait_report):\n \"\"\"\n Parses the given code and returns its Cait representation. If the parse was\n unsuccessful, it attaches the error to the report.\n\n Args:\n code (str): A string of Python code.\n cait_report (dict): A Cait Report to store information in.\n Returns:\n AstNode: The parsed AST reprensetation, or None\n \"\"\"\n try:\n parsed = ast.parse(code)\n except SyntaxError as e:\n cait_report['success'] = False\n cait_report['error'] = e\n return ast.parse(\"\")\n return parsed\n\n\ndef _load_cait(student_code, report):\n \"\"\"\n Retrieves the current report for CAIT. If there is no CAIT report, it will\n generate one. If source code is given, that will be used instead of the\n report's source code.\n\n Args:\n student_code (str): The code to parse into the a CaitNode tree. If\n None, then it will use the code in the report's Source tool.\n report (Report): The report to attach data to.\n\n Returns:\n dict: Returns the Cait Report\n \"\"\"\n if 'cait' not in report:\n report['cait'] = {'success': True, 'error': None,\n 'ast': None, 'cache': {}}\n cait = report['cait']\n if student_code is not None:\n if student_code in cait['cache']:\n cait['ast'] = cait['cache'][student_code]\n return cait\n else:\n student_ast = _parse_source(student_code, cait)\n elif report['source']['success']:\n student_code = report['source']['code']\n if student_code in cait['cache']:\n cait['ast'] = cait['cache'][student_code]\n return cait\n else:\n student_ast = report['source']['ast']\n else:\n report.attach(\"No source code found\", tool='cait',\n category='analyzer')\n cait['success'] = False\n cait['ast'] = CaitNode(ast.parse(\"\"), report=report)\n return cait\n cait['ast'] = cait['cache'][student_code] = CaitNode(student_ast, report=report)\n return cait\n\n\ndef require_tifa(self):\n \"\"\"\n Confirms that TIFA was run successfully, otherwise raises a\n CaitException.\n \"\"\"\n if not self.report['tifa']['success']:\n raise CaitException(\"TIFA was not run prior to CAIT.\")\n\n\n# noinspection PyBroadException\ndef parse_program(student_code=None, report=None):\n \"\"\"\n Parses student code and produces a CAIT representation.\n\n Args:\n student_code (str): The student source code to parse. If None, defaults\n to the code within the Source tool of the given Report.\n report (Report): The report to attach data to. Defaults to MAIN_REPORT.\n\n Returns:\n CaitNode: A CAIT-enhanced representation of the root Node.\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n cait_report = _load_cait(student_code, report)\n return cait_report['ast']\n\n\ndef expire_cait_cache(report=None):\n \"\"\"\n Deletes the most recent CAIT run and any cached CAIT parses.\n\n Args:\n report (Report): The report to attach data to. Defaults to MAIN_REPORT.\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n report['cait']['ast'] = None\n report['cait']['cache'] = {}\n\n\ndef def_use_error(node, report=None):\n \"\"\"\n Checks if node is a name and has a def_use_error\n\n Args:\n node (str or AstNode or CaitNode): The Name node to look up.\n report (Report): The report to attach data to. Defaults to MAIN_REPORT.\n Returns:\n True if the given name has a def_use_error\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n if not isinstance(node, str) and node.ast_name != \"Name\":\n raise TypeError\n try:\n def_use_vars = report['tifa']['issues']['Initialization Problem']\n except KeyError:\n return False\n if not isinstance(node, str):\n node_id = node.id\n else:\n node_id = node\n has_error = False\n for issue in def_use_vars:\n name = issue['name']\n if name == node_id:\n has_error = True\n break\n return has_error\n\n\n# noinspection PyBroadException\ndef data_state(node, report=None):\n \"\"\"\n Determines the Tifa State of the given node.\n\n Args:\n node (str or AstNode or CaitNode): The Name node to look up in TIFA.\n report (Report): The report to attach data to. Defaults to MAIN_REPORT.\n Returns:\n The State of the object (Tifa State) or None if it doesn't exist\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n if not isinstance(node, str) and node.ast_name != \"Name\":\n raise TypeError\n if isinstance(node, str):\n node_id = node\n else:\n node_id = node.id\n try:\n return report['tifa'][\"top_level_variables\"][node_id]\n except KeyError:\n return None\n\n\ndef data_type(node, report=None):\n \"\"\"\n Looks up the type of the node using Tifa's analysis.\n\n Args:\n node (str or AstNode or CaitNode): The Name node to look up in TIFA.\n report (Report): The report to attach data to. Defaults to MAIN_REPORT.\n Returns:\n The type of the object (Tifa type) or None if a type doesn't exist\n \"\"\"\n state = data_state(node, report=report)\n if state is not None:\n return state.type\n return None\n\n\ndef find_match(pattern, student_code=None, report=None, cut=False):\n \"\"\"\n Apply Tree Inclusion and return the first match of the `pattern` in the\n `student_code`.\n\n Args:\n pattern (str): The CaitExpression to match against.\n student_code (str): The string of student code to check against.\n Defaults to the code of the Source tool in the Report.\n report (Report): The report to attach data to.\n cut (bool): Set to true to trim root to first branch\n Returns:\n CaitNode or None: The first matching node for the given pattern, or\n None if nothing was found.\n \"\"\"\n matches = find_matches(pattern=pattern, student_code=student_code,\n report=report, cut=cut)\n if matches:\n return matches[0]\n else:\n return None\n\n\ndef find_matches(pattern, student_code=None, report=None, cut=False):\n \"\"\"\n Apply Tree Inclusion and return all matches of the `pattern` in the\n `student_code`.\n\n Args:\n pattern (str): The CaitExpression to match against.\n student_code (str): The string of student code to check against.\n report (Report): The report to attach data to.\n cut (bool): Set to true to trim root to first branch\n Returns:\n List[CaitNode]: All matching nodes for the given pattern.\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n cait_report = _load_cait(student_code, report)\n if not cait_report['success']:\n return []\n student_ast = cait_report['ast']\n matcher = StretchyTreeMatcher(pattern, report=report)\n return matcher.find_matches(student_ast)\n\n\ndef find_submatches(pattern, student_code, is_mod=False):\n \"\"\"\n Incomplete.\n \"\"\"\n return find_expr_sub_matches(pattern, student_code, is_mod)\n\n\ndef find_expr_sub_matches(pattern, student_code, is_mod=False, report=None):\n \"\"\"\n Finds pattern in student_code\n # TODO: Add code to make pattern accept CaitNodes\n # TODO: Make this function without so much meta knowledge\n Args:\n pattern: the expression to find (str that MUST evaluate to a Module node with a single child or an AstNode)\n student_code: student subtree\n is_mod (bool): currently hack for multiline sub matches\n report: defaults to MAIN_REPORT unless another one exists\n Returns:\n a list of matches or False if no matches found\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n is_node = isinstance(pattern, CaitNode)\n if not isinstance(pattern, str) and not is_node:\n raise TypeError(\"pattern expected str or CaitNode, found {0}\".format(type(pattern)))\n matcher = StretchyTreeMatcher(pattern, report=report)\n if (not is_node and not is_mod) and len(matcher.root_node.children) != 1:\n raise ValueError(\"pattern does not evaluate to a singular statement\")\n return matcher.find_matches(student_code, check_meta=False)\n","src/lib/pedal/cait/cait_node.py":"import ast\nfrom pedal.cait.ast_helpers import dump\nfrom types import MethodType\nfrom pedal.report import MAIN_REPORT\n\n\nclass CaitNode:\n \"\"\"\n A wrapper class for AST nodes. Linearizes access to the children of the ast\n node and saves the field this AST node\n originated from.\n\n Attributes:\n ast_name (str): The name of the original AstNode (e.g., \"Name\" or\n \"FunctionDef\")\n\n TODO: May want to just add fields and methods to the existing AST nodes and\n use a production pattern instead.\n \"\"\"\n\n def __init__(self, ast_node, my_field='', tid=0, lin_tree=None,\n ancestor=None, report=None):\n \"\"\"\n\n Args:\n ast_node (ast_node): The AST node to be wrapped\n my_field (str): the field of the parent node that produced this child.\n tid (int): the tree id\n lin_tree list of cait_node: A linear version of the tree\n ancestor (cait_node): The parent of this node\n report: The report associated with this particular match.\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n self.report = report\n self.children = []\n self.astNode = ast_node\n self.field = my_field\n self.tree_id = tid\n self.parent = ancestor\n if lin_tree is None:\n self.linear_tree = [self]\n else:\n lin_tree.append(self)\n self.linear_tree = lin_tree\n\n # reference to the easy node wrapping the ast_node\n setattr(ast_node, 'cait_node', self)\n\n tid_count = tid\n\n my_field_generator = ast.iter_fields(self.astNode)\n for item in my_field_generator:\n field, value = item\n # if the field doesn't have a value, no child exists\n if value is None:\n continue\n\n # If the children are not in an array, wrap it in an array for\n # consistency in the code the follows\n if not isinstance(value, list):\n value = [value]\n\n # Reference ast_node_visitor.js for the original behavior and keep note of it for the purposes of handling\n # the children noting the special case when the nodes of the array are actually parameters of the node\n # (e.g. a load function) instead of a child node\n for sub_value in value:\n if isinstance(sub_value, ast.AST):\n new_child = CaitNode(sub_value, my_field=field,\n tid=tid_count + 1,\n lin_tree=self.linear_tree,\n ancestor=self,\n report=self.report)\n self.children.append(new_child)\n tid_count = len(self.linear_tree) - 1\n\n def __str__(self):\n return ''.join([self.field, \"\\n\", dump(self.astNode)])\n\n def numeric_logic_check(self, mag, expr):\n \"\"\"\n If this node is a Compare or BoolOp node, sees if the logic in expr (a javascript string being a logical\n statement) matches the logic of self. This assumes that we are only comparing numerical values to a single\n variable\n TODO: modify this to take multiple variables\n TODO: modify to support more than +, -, *, and / BinOps\n TODO: modify to support unary operators other than USub and Not\n TODO: This is very finicky and buggy, try not to use it\n Args:\n mag (float): the order of magnitude that should be added to numbers to check logic, 1 is usually a good value,\n especially when working with the set of integers.\n expr (Compare or BoolOp): the \"Compare\" or \"BoolOp\" tree to check self against\n\n Returns:\n bool: True if self (typically student node) and expr are equivalent boolean expressions\n \"\"\"\n\n def eval_unop(unop_num, unop_node):\n operand = eval_selector(unop_num, unop_node.operand)\n op = unop_node.op_name\n\n return {\"USub\": -operand,\n \"Not\": not operand}[op]\n\n def eval_binop(binop_num, binop_node):\n left = eval_selector(binop_num, binop_node.left)\n right = eval_selector(binop_num, binop_node.right)\n op = binop_node.op_name\n\n return {\n \"Add\": left + right,\n \"Sub\": left - right,\n \"Mult\": left * right,\n \"Div\": left / right}[op]\n\n def eval_selector(op_num, op_expr):\n op_expr = op_num if op_expr.ast_name == \"Name\" else op_expr\n if isinstance(op_expr, (int, float)):\n return op_expr\n if op_expr.ast_name == \"BinOp\":\n return eval_binop(op_num, op_expr)\n if op_expr.ast_name == \"UnaryOp\":\n return eval_unop(op_num, op_expr)\n if op_expr.ast_name == \"Num\":\n return op_expr.n\n raise NotImplementedError\n\n def eval_bool_comp(num_list, comp_ast):\n ops = comp_ast.ops_names\n comps = comp_ast.comparators\n results = []\n current = comp_ast.left\n left = current\n\n for num_i in num_list:\n result = True\n for op, comp in zip(ops, comps):\n current = eval_selector(num_i, current)\n comp_p = eval_selector(num_i, comp)\n\n res = {\n \"Eq\": current == comp_p,\n \"NotEq\": current != comp_p,\n \"Lt\": current < comp_p,\n \"LtE\": current <= comp_p,\n \"Gt\": current > comp_p,\n \"GtE\": current >= comp_p,\n }[op]\n current = comp\n result = result and res\n if not result:\n break\n results.append(result)\n current = left\n return results\n\n def eval_boolop(num_list, boolop_ast):\n boolop = boolop_ast.op_name\n values = boolop_ast.values\n results_c = None\n is_and = boolop == \"And\"\n for value in values:\n if value.ast_name == \"Compare\":\n results = eval_bool_comp(num_list, value)\n else: # should be boolop\n results = eval_boolop(num_list, value)\n if results_c is None:\n results_c = results\n else: # compile results\n new_result = []\n for result1, result2 in zip(results_c, results):\n if is_and:\n new_result.append(result1 and result2)\n else:\n new_result.append(result1 or result2)\n results_c = new_result\n return results_c\n\n try:\n ins_expr = CaitNode(ast.parse(expr), report=self.report).body[0].value\n ins_nums = ins_expr.find_all(\"Num\")\n std_nums = self.find_all(\"Num\")\n test_nums = []\n for num in ins_nums:\n raw_num = num.n\n test_nums.append(raw_num)\n test_nums.append(raw_num + mag)\n test_nums.append(raw_num - mag)\n for num in std_nums:\n raw_num = num.n\n test_nums.append(raw_num)\n test_nums.append(raw_num + mag)\n test_nums.append(raw_num - mag)\n\n if self.ast_name == \"Compare\":\n std_res = eval_bool_comp(test_nums, self)\n elif self.ast_name == \"BoolOp\":\n std_res = eval_boolop(test_nums, self)\n else:\n return False\n\n if ins_expr.ast_name == \"Compare\":\n ins_res = eval_bool_comp(test_nums, ins_expr)\n elif ins_expr.ast_name == \"BoolOp\":\n ins_res = eval_boolop(test_nums, ins_expr)\n else:\n raise TypeError\n return ins_res == std_res\n except Exception:\n return False\n\n def get_next_tree(self):\n \"\"\"Gets the next tree in the AST\n This method gets the next AST node that is of equal or higher level than self. Returns None if the end of the\n tree is reached\n TODO: Create a get sibling method.\n\n Returns:\n cait_node: The next tree in the AST\n\n \"\"\"\n\n # adding function to track tree ids\n def visit_counter(self, node):\n self.counter += 1\n self.generic_visit(node)\n\n node_counter = ast.NodeVisitor()\n setattr(node_counter, 'counter', self.tree_id)\n node_counter.visit = MethodType(visit_counter, node_counter)\n\n # getting ids\n node_counter.visit(self.astNode)\n out_of_tree = node_counter.counter >= len(self.linear_tree) # check if out of bounds\n # len(self.children) > 0 and self.children[-1] == node_counter\n if out_of_tree:\n return None\n return self.linear_tree[node_counter.counter]\n\n def get_child(self, node):\n \"\"\"\n\n Args:\n node: a non-CaitNode ast node\n\n Returns:\n cait_node: the corresponding cait_node to the child\n \"\"\"\n if isinstance(node, ast.AST):\n for child in self.children:\n if child.astNode == node:\n return child\n elif isinstance(node, int):\n return self.children(node)\n return None\n\n @staticmethod\n def get_ast_name(node):\n return type(node).__name__\n\n def get_clashing_attr(self, key):\n if key == \"value\":\n return self.get_value()\n\n def __getattr__(self, item):\n key = item\n \"\"\"\n Non-ast node attributes based on ast_node attributes\n \"\"\"\n node_name = CaitNode.get_ast_name(self.astNode)\n if node_name == \"Assign\" and key == \"target\":\n key = \"targets\"\n if item in AST_SINGLE_FUNCTIONS:\n key = item[:-5] # strip suffix '_name'\n if item in AST_ARRAYS_OF_FUNCTIONS:\n key = item[:-6] # strip suffix '_names'\n\n \"\"\"\n Non-ast node attributes\n \"\"\"\n if key == 'next_tree':\n return self.get_next_tree()\n if key == 'ast_name':\n return node_name\n elif key == '_name':\n return self.astNode.name\n elif key == 'ast_node':\n return self.astNode\n else: # ast node attributes or derivative attributes\n if hasattr(self.astNode, key):\n # noinspection PyBroadException\n try:\n field = self.astNode.__getattribute__(key)\n except Exception:\n field = None\n if node_name == \"Assign\" and item != key:\n if item == \"target\":\n return field[0].cait_node # Get's the relevant ast node\n elif item == \"targets\" and isinstance(field, list):\n easy_array = []\n for node in field:\n easy_array.append(node.cait_node)\n return easy_array\n else:\n return field\n elif item in AST_SINGLE_FUNCTIONS:\n return type(field).__name__\n elif item in AST_ARRAYS_OF_FUNCTIONS:\n str_ops_list = []\n for op in field:\n str_ops_list.append(type(op).__name__)\n return str_ops_list\n elif isinstance(field, ast.AST):\n return field.cait_node\n elif isinstance(field, list):\n try:\n return [f.cait_node for f in field]\n except AttributeError:\n # This can only happen in NonLocals, which has a list\n # of raw strings in the `names` property\n return field\n else:\n return field\n else: # get added field that may have existed for different node types\n return self.get_clashing_attr(key)\n\n def find_matches(self, pattern, is_mod=False, check_meta=True, use_previous=True):\n \"\"\"\n Retrieves any patterns that match against this CaitNode. Expected to be\n used for subpattern matching.\n \"\"\"\n # Avoid circular import\n import pedal.cait.stretchy_tree_matching as stm\n is_node = isinstance(pattern, CaitNode)\n if not isinstance(pattern, str) and not is_node:\n raise TypeError(\"pattern expected str or CaitNode, found {0}\".format(type(pattern)))\n matcher = stm.StretchyTreeMatcher(pattern, report=self.report)\n if (not is_node and not is_mod) and len(matcher.root_node.children) != 1:\n raise ValueError(\"pattern does not evaluate to a singular statement\")\n prev_match = self.map if use_previous else None\n return matcher.find_matches(self, check_meta=check_meta, pre_match=prev_match)\n\n def find_match(self, pattern, is_mod=False):\n matches = self.find_matches(pattern, is_mod)\n if len(matches) != 0:\n return matches[0]\n return None\n\n def find_all(self, node_type):\n \"\"\"Finds all nodes defined by string node_type\n\n Args:\n node_type: the string representing the \"type\" of node to look for\n\n Returns:\n a list of Ast Nodes (cait_nodes) of self that are of the specified type (including self if self\n meets that criteria)\n \"\"\"\n items = []\n visitor = ast.NodeVisitor()\n # setattr(visitor, \"current_id\", self.tree_id - 1)\n setattr(visitor, \"items\", items)\n func_name = 'visit_' + node_type\n\n def main_visit(self, node):\n self.items.append(node.cait_node)\n return self.generic_visit(node)\n\n func_ref = main_visit\n setattr(visitor, func_name, MethodType(func_ref, visitor))\n visitor.visit(self.astNode)\n return visitor.items\n\n def has(self, node):\n \"\"\"\n Determine if this node has the given `node`.\n \"\"\"\n if isinstance(node, (int, float)):\n visitor = ast.NodeVisitor()\n has_num = []\n\n def visit_Num(self, potential):\n has_num.append(node == potential.n)\n return self.generic_visit(potential)\n\n visitor.visit_Num = MethodType(visit_Num, visitor)\n visitor.visit(self.astNode)\n return any(has_num)\n elif node.ast_name != \"Name\":\n return False\n visitor = ast.NodeVisitor()\n has_name = []\n\n def visit_Name(self, potential):\n has_name.append(node.id == potential.id)\n return self.generic_visit(potential)\n\n visitor.visit_Name = MethodType(visit_Name, visitor)\n visitor.visit(self.astNode)\n return any(has_name)\n\n def is_before(self, other):\n \"\"\"\n Uses tree id to check if self node came before other.\n Args:\n other (cait_node): the other node to compare to\n\n Returns:\n bool: True if self is before other\n \"\"\"\n try:\n return self.tree_id < other.tree_id and self.linear_tree == other.linear_tree\n except Exception:\n raise TypeError\n\n def is_ast(self, ast_name):\n \"\"\"\n Checks self is the type of the specified ast node\n Args:\n ast_name (str): The name of the ast node type\n\n Returns:\n bool: True if this node's ast name matches the specified one\n \"\"\"\n if not isinstance(ast_name, str):\n ast_name = CaitNode.get_ast_name(ast_name.astNode)\n return CaitNode.get_ast_name(self.astNode).lower() == ast_name.lower()\n\n def is_method(self):\n \"\"\"\n Checks if self is a method\n\n Returns:\n bool: True if I'm a FunctionDef, and if any of my parents are ClassDef.\n \"\"\"\n # Check if I'm a FunctionDef, and if any of my parents are ClassDef.\n if self.ast_name != \"FunctionDef\":\n return False\n current = self.parent\n while current is not None:\n if current.ast_name == \"ClassDef\":\n return True\n # Don't treat closures as methods\n elif current.ast_name == \"FunctionDef\":\n return False\n current = current.parent\n return False\n\n def get_data_state(self):\n \"\"\"\n Gets the data_state object of self\n\n Returns:\n data_state or None: returns data_state if self is a name and exists, otherwise None\n \"\"\"\n if self.ast_name != \"Name\":\n return None\n try:\n return self.report['tifa'][\"top_level_variables\"][self.id]\n except KeyError:\n return None\n\n def get_data_type(self):\n \"\"\"\n\n Returns:\n type of the variable associated with this node if it's a name node, otherwise None.\n \"\"\"\n state = self.get_data_state()\n if state is None:\n return None\n else:\n return state.type\n\n def was_type(self, tp):\n \"\"\"\n\n Returns:\n type of the variable associated with this node if it's a name node, otherwise None.\n \"\"\"\n state = self.get_data_state()\n if state is None:\n return None\n else:\n return state.was_type(tp)\n\n def get_value(self):\n \"\"\"\"\n Returns:\n Value of node if Num or Str, and get_data_state if Name\n \"\"\"\n value = None\n if self.is_ast(\"Num\"):\n value = self.n\n elif self.is_ast(\"Str\"):\n value = self.s\n elif self.is_ast(\"Name\"):\n # TODO: Decide on what this should return...\n value = self.id\n return value\n\n\nAST_SINGLE_FUNCTIONS = [\"ctx_name\", \"op_name\"]\nAST_ARRAYS_OF_FUNCTIONS = [\"ops_names\"]\n","src/lib/pedal/cait/stretchy_tree_matching.py":"import ast\nimport re\nfrom pedal.cait.ast_map import AstMap\nfrom pedal.cait.cait_node import CaitNode\n\n# \"Enums\" for _name_regex\n_VAR = \"var\"\n_EXP = \"exp\"\n_WILD = \"wild\"\n_NONE_FIELD = \"none\"\n\n\ndef is_primitive(item):\n \"\"\"\n Determines if the given item is a primitive value (either an int, float,\n str, bool, or None).\n\n Args:\n item (any): Any value\n Returns:\n bool: Whether the item is a primitive value.\n \"\"\"\n return isinstance(item, (int, float, str, bool)) or item is None\n\n\ndef _name_regex(name_id):\n var_match = re.compile('^_[^_].*_$') # /regex\n exp_match = re.compile('^__.*__$') # /regex\n wild_card = re.compile('^___$') # /regex\n return {_VAR: var_match.match(name_id),\n _EXP: exp_match.match(name_id),\n _WILD: wild_card.match(name_id)}\n\n\nclass StretchyTreeMatcher:\n def __init__(self, ast_or_code, report, filename=\"__main__\"):\n \"\"\"\n The StretchyTreeMatcher is used to compare a pattern against some\n student code. It produces a set of potential mappings between them.\n\n Args:\n ast_or_code (str or AstNode): The students' code or a valid AstNode from\n `ast.parse`. If the code has invalid syntax, a SyntaxError\n will be raised.\n filename (str): The filename to parse with - only used for error\n reporting.\n report (Report): A report to obtain data from.\n \"\"\"\n self.report = report\n if isinstance(ast_or_code, str):\n ast_node = ast.parse(ast_or_code, filename)\n else:\n ast_node = ast_or_code\n # Build up root\n if ast_node is None:\n self.root_node = None\n elif isinstance(ast_node, CaitNode):\n self.root_node = ast_node\n else:\n self.root_node = CaitNode(ast_node, _NONE_FIELD, report=self.report)\n\n def find_matches(self, ast_or_code, filename=\"__main__\", check_meta=True, pre_match=None):\n \"\"\"\n Args:\n ast_or_code (str or AstNode): The students' code or a valid AstNode from\n `ast.parse`. If the code has invalid syntax, a SyntaxError\n will be raised.\n filename (str): The filename to parse with - only used for error\n reporting.\n check_meta (bool): Determine if the nodes came from the same AST\n field.\n Returns:\n list[AstMap]: A list of AstMaps that are suitable matches.\n \"\"\"\n if isinstance(ast_or_code, str):\n other_tree = CaitNode(ast.parse(ast_or_code, filename), report=self.report)\n elif isinstance(ast_or_code, CaitNode):\n other_tree = ast_or_code\n else:\n other_tree = CaitNode(ast_or_code, _NONE_FIELD, report=self.report)\n explore_root = self.root_node\n trim_set = [\"Expr\", \"Module\"]\n explore_root_old_field = explore_root.field\n if self.root_node is not None: # Trimming ins_node\n while (len(explore_root.children) == 1 and\n explore_root.ast_name in trim_set):\n explore_root.field = explore_root_old_field\n explore_root = explore_root.children[0]\n explore_root_old_field = explore_root.field\n explore_root.field = _NONE_FIELD\n other_root = other_tree\n other_root_old_field = other_root.field\n if other_root is not None: # Trimming std_node\n while len(other_root.children) == 1 and other_root.ast_name in trim_set:\n other_root.field = other_root_old_field\n other_root = other_root.children[0]\n other_root_old_field = other_root.field\n other_root.field = _NONE_FIELD\n matches = self.any_node_match(explore_root, other_root,\n check_meta=check_meta, pre_match=pre_match)\n explore_root.field = explore_root_old_field\n other_root.field = other_root_old_field\n return matches\n\n def any_node_match(self, ins_node, std_node, check_meta=True, cut=False, pre_match=None):\n \"\"\"\n Finds whether ins_node can be matched to some node in the tree std_node\n\n Args:\n ins_node:\n std_node:\n check_meta:\n cut:\n\n Returns:\n list of AstMaps: a mapping of nodes and a symbol table mapping ins_node to\n some node in the tree std_node or False if such a matching does not\n exist\n \"\"\"\n # @TODO: create a more public function that converts ins_node and std_node into CaitNodes\n # TODO: Create exhaustive any_node_match\n # matching: an object representing the mapping and the symbol table\n matching = self.deep_find_match(ins_node, std_node, check_meta, pre_match=pre_match)\n # if a direct matching is found\n if matching:\n for match in matching:\n match.match_root = std_node\n else:\n matching = []\n # return matching # return it\n # if not matching or exhaust: # otherwise\n # try to matching ins_node to each child of std_node, recursively\n for std_child in std_node.children:\n matching_c = self.any_node_match(ins_node, std_child, check_meta=check_meta, cut=cut, pre_match=pre_match)\n if matching_c:\n for match in matching_c:\n match.match_root = std_child\n # return matching\n matching = matching + matching_c\n if len(matching) > 0:\n return matching\n return []\n\n def deep_find_match(self, ins_node, std_node, check_meta=True,\n pre_match=None):\n \"\"\"\n Finds whether ins_node and matches std_node and whether ins_node's children flexibly match std_node's children\n in order\n Args:\n ins_node: The instructor ast that should be included in the student AST\n std_node: The student AST that we are searching for the included tree\n check_meta: Flag, if True, check whether the two nodes originated from the same ast field\n pre_match: If this was part of a previous match...\n\n Returns:\n a mapping of nodes and a symbol table mapping ins_node to std_node, or [] if no mapping was found\n \"\"\"\n method_name = \"deep_find_match_\" + type(ins_node.astNode).__name__\n target_func = getattr(self, method_name, self.deep_find_match_generic)\n return target_func(ins_node, std_node, check_meta, pre_match=pre_match)\n\n # noinspection PyPep8Naming\n def deep_find_match_Name(self, ins_node, std_node, check_meta=True, pre_match=None):\n name_id = ins_node.astNode.id\n match = _name_regex(name_id)\n mapping = AstMap()\n matched = False\n meta_matched = self.metas_match(ins_node, std_node, check_meta)\n if match[_VAR] and meta_matched: # if variable\n if type(std_node.astNode).__name__ == \"Name\":\n return self.deep_find_match_generic(ins_node, std_node,\n check_meta=check_meta, ignores=[\"ctx\"],pre_match=pre_match)\n # could else return False, but shallow_match_generic should do this as well\n elif match[_EXP]: # and meta_matched: # if expression\n # terminate recursion, the whole subtree should match since expression nodes match to anything\n mapping.merge_map_with(pre_match)\n mapping.add_exp_to_sym_table(ins_node, std_node)\n matched = True\n elif match[_WILD] and meta_matched: # if wild card, don't care\n # terminate the recursion, the whole subtree should match since wild cards match to anything\n matched = True\n\n if matched:\n mapping.add_node_pairing(ins_node, std_node)\n return [mapping]\n # else\n return self.deep_find_match_generic(ins_node, std_node,\n check_meta=check_meta, ignores=[\"ctx\"], pre_match=pre_match)\n\n # noinspection PyPep8Naming\n def deep_find_match_BinOp(self, ins_node, std_node, check_meta=True, pre_match=None):\n op = ins_node.astNode.op\n op = type(op).__name__\n is_generic = not (op == \"Mult\" or op == \"Add\")\n if is_generic:\n return self.deep_find_match_generic(ins_node, std_node, check_meta, pre_match=pre_match)\n else: # this means that the node is clearly commutative\n return self.deep_find_match_binflex(ins_node, std_node, False, pre_match=pre_match)\n\n # noinspection PyMethodMayBeStatic\n def binflex_helper(self, case_left, case_right, new_mappings, base_mappings, pre_match=None):\n \"\"\"\n adds to new_mappings (return/modify by argument) the mappings for both the left and right subtrees as denoted by\n case_left and case_right\n Args:\n case_left: The mappings for the left opperand\n case_right: The mappings for the right opperand\n new_mappings: The new set of mappings to generate\n base_mappings: The original mappings of the binop node\n pre_match: A mapping passed down from an initial match\n Returns:\n None\n \"\"\"\n if case_left and case_right:\n for case_l in case_left:\n new_map = base_mappings[0].new_merged_map(case_l).new_merged_map(pre_match)\n for case_r in case_right:\n both = new_map.new_merged_map(case_r)\n if not both.has_conflicts():\n new_mappings.append(both)\n\n def deep_find_match_binflex(self, ins_node, std_node, check_meta=False, pre_match=None):\n base_mappings = self.shallow_match(ins_node, std_node, check_meta)\n if not base_mappings:\n return []\n op_mappings = self.shallow_match(ins_node.children[1], std_node.children[1], check_meta=True)\n if not op_mappings:\n return []\n base_mappings = [base_mappings[0].new_merged_map(op_mappings[0])]\n\n if base_mappings:\n ins_left = ins_node.children[0] # instructor left ast node\n ins_right = ins_node.children[2] # instructor right ast node\n std_left = std_node.children[0] # student left ast node\n std_right = std_node.children[2] # student right ast node\n new_mappings = []\n # case 1: ins_left->std_left and ins_right->std_right\n case_left = self.deep_find_match(ins_left, std_left, False)\n case_right = self.deep_find_match(ins_right, std_right, False)\n self.binflex_helper(case_left, case_right, new_mappings, base_mappings, pre_match=pre_match)\n # case 2: ins_left->std_right and ins_right->std_left\n case_left = self.deep_find_match(ins_left, std_right, False)\n case_right = self.deep_find_match(ins_right, std_left, False)\n self.binflex_helper(case_left, case_right, new_mappings, base_mappings, pre_match=pre_match)\n if len(new_mappings) == 0:\n return []\n return new_mappings\n return []\n\n def deep_find_match_Expr(self, ins_node, std_node, check_meta=True, pre_match=None):\n \"\"\"\n An Expression node (not to be confused with expressions denoted by the instructor nodes in Name ast nodes)\n checks whether it should be generic, or not\n Args:\n ins_node: Instructor ast to find in the student ast\n std_node: Student AST to search for the instructor ast in\n check_meta: flag to check whether the fields of the instructor node and the student node should match\n pre_match: An AstMap from a previous matching run\n\n Returns:\n AstMap: a mapping between the instructor and student asts, or False if such a mapping doesn't exist\n \"\"\"\n # if check_meta and ins_node.field != std_node.field:\n if not self.metas_match(ins_node, std_node, check_meta):\n return []\n mapping = AstMap() if pre_match is None else pre_match\n value = ins_node.value\n ast_type = type(value.astNode).__name__\n if ast_type == \"Name\":\n name_id = value.astNode.id\n exp_match = re.compile('^__.*__$') # /regex\n wild_card = re.compile('^___$') # /regex\n matched = False\n meta_matched = self.metas_match(ins_node, std_node, check_meta)\n if exp_match.match(name_id): # and meta_matched: # if expression\n # terminate recursion, the whole subtree should match since expression nodes match to anything\n mapping.add_exp_to_sym_table(value, std_node)\n matched = True\n elif wild_card.match(name_id) and meta_matched: # if wild card, don't care\n # terminate the recursion, the whole subtree should match since wild cards match to anything\n matched = True\n if matched:\n mapping.add_node_pairing(ins_node, std_node)\n return [mapping]\n return self.deep_find_match_generic(ins_node, std_node, check_meta)\n\n def deep_find_match_generic(self, ins_node, std_node, check_meta=True, ignores=None, pre_match=None):\n \"\"\"\n This first uses shallow match to find a base map (match) from which to\n build off. The algorithm then tracks all the possible mappings that\n match a given child node in the instructor AST, keeping track of which\n siblings have been visited.\n\n For each instructor child, when all children of the student node have\n been iterated through recursively, a helper function is called. This\n helper function determines which possible children validly can extend\n the base match to create a set of new base maps through use of the\n indicies of the sibilings.\n\n The process repeats itself until no matches can be grown or until each\n instructor child node has been visited\n\n Args:\n ins_node: Instructor ast to find in the student ast\n std_node: Student AST to search for the instructor ast in\n check_meta: flag to check whether the fields of the instructor node and the student node should match\n ignores: List of fields to ignore in the field match\n pre_match: a map from a previous match\n\n Returns:\n a mapping between the isntructor and student asts, or [] if such a mapping doesn't exist\n \"\"\"\n if ignores is None:\n ignores = []\n base_mappings = self.shallow_match(ins_node, std_node, check_meta)\n if base_mappings:\n for mapping in base_mappings:\n mapping.merge_map_with(pre_match)\n # base case this runs 0 times because no children\n # find each child of ins_node that matches IN ORDER\n base_sibs = [-1]\n youngest_sib = 0\n # for each child\n for i, insChild in enumerate(ins_node.children):\n # make a new set of maps\n running_maps = []\n running_sibs = []\n if insChild.field in ignores:\n continue\n # accumulate all potential matches for current child\n for j, std_child in enumerate(std_node.children[youngest_sib:], youngest_sib):\n std_child = std_node.children[j]\n new_mapping = self.deep_find_match(insChild, std_child, check_meta)\n if new_mapping:\n running_maps.append(new_mapping)\n running_sibs.append(j)\n map_update = self.map_merge(base_mappings, base_sibs, running_maps, running_sibs)\n if map_update is None:\n return []\n base_mappings = map_update['new_maps']\n base_sibs = map_update['new_sibs']\n youngest_sib = map_update['youngest_sib'] + 1\n return base_mappings\n return []\n\n # noinspection PyMethodMayBeStatic\n def map_merge(self, base_maps, base_sibs, run_maps, run_sibs):\n \"\"\"\n Merges base_maps with the current possible maps. Helper method to deep_find_match_generic. checks whether each\n mapping in run_maps can extend the match to any possible mapping in base_maps.\n\n Args:\n base_maps: The original mappings\n base_sibs: The corresponding siblings for each mapping in base_maps\n run_maps: The set of maps to merge into the current base_maps\n run_sibs: The corresponding siblings for each mapping in run_maps\n\n Returns:\n A new set of maps for all valid extensions of base_maps with running maps\n \"\"\"\n # no matching nodes were found\n if len(run_maps) == 0:\n return None\n new_maps = []\n new_sibs = []\n youngest_sib = run_sibs[0]\n for baseMap, base_sib in zip(base_maps, base_sibs):\n for run_map, runSib in zip(run_maps, run_sibs):\n if runSib > base_sib:\n for run_mapsub in run_map:\n new_map = baseMap.new_merged_map(run_mapsub)\n if not new_map.has_conflicts(): # if it's a valid mapping\n new_maps.append(new_map)\n new_sibs.append(runSib)\n if len(new_maps) == 0:\n return None\n return {\n 'new_maps': new_maps,\n 'new_sibs': new_sibs,\n 'youngest_sib': youngest_sib\n }\n\n # noinspection PyMethodMayBeStatic,PyPep8Naming,PyUnusedLocal\n def shallow_match_Module(self, ins_node, std_node, check_meta=True):\n \"\"\"\n Flexibly matches a module node to a module or a body\n Args:\n ins_node:\n std_node:\n check_meta:\n\n Returns:\n a mapping of ins_node to std_node, or False if doesn't match\n \"\"\"\n if type(std_node.astNode).__name__ == \"Module\" or std_node.field == \"body\":\n mapping = AstMap()\n mapping.add_node_pairing(ins_node, std_node)\n return [mapping]\n return []\n\n def shallow_symbol_handler(self, ins_node, std_node, id_val, check_meta=True):\n \"\"\"\n TODO: Make this handle the func field to handle functions\n Matches ins_node to std_node for different cases of encountering a name node in ins_node\n case 1: _var_ matches if std_node is a name node and automatically returns a mapping and symbol table\n case 2: __exp__ matches to any subtree and automatically returns a mapping and symbol table\n case 3: ___ matches to any subtree and automatically returns a mapping\n case 4: matches only if the exact names are the same (falls through to shallow_match_generic)\n Args:\n ins_node:\n std_node:\n id_val:\n check_meta:\n\n Returns:\n list of AstMap: a mapping of ins_node to std_node and possibly a symbol_table, or False if it doesn't match\n \"\"\"\n name_id = ins_node.astNode.__getattribute__(id_val)\n match = _name_regex(name_id)\n mapping = AstMap()\n matched = False\n # TODO: add functionality to add function references to func_table?\n meta_matched = self.metas_match(ins_node, std_node, check_meta)\n if match[_VAR] and meta_matched: # variable\n if type(std_node.astNode).__name__ == \"Name\" or id_val in [\"attr\", \"arg\"]:\n if id_val in [\"attr\", \"arg\"]:\n std_node.astNode._id = std_node.astNode.__getattribute__(id_val)\n if std_node.field == \"func\" and ins_node.field != _NONE_FIELD:\n # TODO: This 'ins_node.field != _NONE_FIELD' code is for an obscure edge case where the\n # instructor code is only _var_\n std_node.astNode._id = std_node.astNode.__getattribute__(id_val)\n mapping.add_func_to_sym_table(ins_node, std_node)\n else:\n std_node.astNode._id = std_node.astNode.__getattribute__(id_val)\n mapping.add_var_to_sym_table(ins_node, std_node) # TODO: Capture result?\n matched = True\n # could else return False, but shallow_match_generic should do this as well\n elif match[_EXP] and meta_matched:\n mapping.add_exp_to_sym_table(ins_node, std_node)\n matched = True\n elif match[_WILD] and meta_matched:\n matched = True\n\n if matched:\n mapping.add_node_pairing(ins_node, std_node)\n return [mapping]\n # else\n return self.shallow_match_main(ins_node, std_node, check_meta=check_meta, ignores=[\"ctx\"])\n\n # noinspection PyPep8Naming,PyMethodMayBeStatic\n def shallow_match_arg(self, ins_node, std_node, check_meta=True):\n ins_node.astNode._id = ins_node.arg\n # TODO: annotations are currently ignored because shallow_symbol_handler doesn't handle them, feature? or\n # should we fix this. Although this should actually be toggleable?\n return self.shallow_symbol_handler(ins_node, std_node, \"arg\", check_meta=check_meta)\n\n def shallow_match_arguments(self, ins_node, std_node, check_meta=True):\n # TODO: do we ignore default values? Currently not ignored\n return self.shallow_match_generic(ins_node, std_node, check_meta=check_meta)\n\n # noinspection PyPep8Naming,PyMethodMayBeStatic\n def shallow_func_handle(self, ins_node, std_node, check_meta=True):\n if ins_node.field == \"func\" and std_node.field == \"func\":\n ins_node.astNode._id = ins_node.astNode.attr\n return self.shallow_symbol_handler(ins_node, std_node, \"attr\", check_meta)\n return self.shallow_match_generic(ins_node, std_node, check_meta)\n\n def shallow_match_Attribute(self, ins_node, std_node, check_meta=True):\n if ins_node.field == \"func\" and std_node.ast_name == \"Attribute\":\n return self.shallow_func_handle(ins_node, std_node, check_meta)\n elif std_node.ast_name == \"Attribute\":\n ins_node.astNode._id = ins_node.attr # TODO: Fix this hack more gracefully\n # add_var_to_sym_table in ast_map needs the id attribute to make the map\n return self.shallow_symbol_handler(ins_node, std_node, \"attr\", check_meta)\n else:\n return self.shallow_match_generic(ins_node, std_node, check_meta)\n\n # noinspection PyPep8Naming\n def shallow_match_Name(self, ins_node, std_node, check_meta=True):\n \"\"\"\n TODO: Make this handle the func field to handle functions\n Matches ins_node to std_node for different cases of encountering a name node in ins_node\n case 1: _var_ matches if std_node is a name node and automatically returns a mapping and symbol table\n case 2: __exp__ matches to any subtree and automatically returns a mapping and symbol table\n case 3: ___ matches to any subtree and automatically returns a mapping\n case 4: matches only if the exact names are the same (falls through to shallow_match_generic)\n Args:\n ins_node:\n std_node:\n check_meta:\n\n Returns:\n list of AstMap: a mapping of ins_node to std_node and possibly a symbol_table, or False if it doesn't match\n \"\"\"\n ins_node.ast_node._id = ins_node.id\n return self.shallow_symbol_handler(ins_node, std_node, \"id\", check_meta)\n\n # noinspection PyPep8Naming,PyMethodMayBeStatic\n def shallow_match_Pass(self, ins_node, std_node, check_meta=True):\n \"\"\"\n An empty body should match to anything\n Args:\n ins_node: Instructor ast to find in the student ast\n std_node: Student AST to search for the instructor ast in\n check_meta: flag to check whether the fields of the instructor node and the student node should match\n\n Returns:\n list of AstMap: a mapping between the isntructor and student asts, or False if such a mapping doesn't exist\n \"\"\"\n # if check_meta and ins_node.field != std_node.field:\n if not self.metas_match(ins_node, std_node, check_meta):\n return []\n mapping = AstMap()\n mapping.add_node_pairing(ins_node, std_node)\n return [mapping]\n\n # noinspection PyPep8Naming,PyMethodMayBeStatic\n def shallow_match_Expr(self, ins_node, std_node, check_meta=True):\n \"\"\"\n An Expression node (not to be confused with expressions denoted by the instructor nodes in Name ast nodes)\n should match to anything\n Args:\n ins_node: Instructor ast to find in the student ast\n std_node: Instructor ast to find in the student ast\n check_meta: flag to check whether the fields of the instructor node and the student node should match\n\n Returns:\n a mapping between the instructor and student asts, or False if such a mapping doesn't exist\n \"\"\"\n # if check_meta and ins_node.field != std_node.field:\n if not self.metas_match(ins_node, std_node, check_meta):\n return []\n mapping = AstMap()\n mapping.add_node_pairing(ins_node, std_node)\n return [mapping]\n\n def shallow_match_Call(self, ins_node, std_node, check_meta=True):\n return self.shallow_match_main(ins_node, std_node, check_meta, ignores=None)\n # matches = self.shallow_match_main(ins_node, std_node, check_meta, ignores=[\"func\"])\n # if matches:\n # pass\n # return None\n # TODO: Make this handle Calls more intelligently\n\n # noinspection PyPep8Naming\n def shallow_match_FunctionDef(self, ins_node, std_node, check_meta=True):\n ins = ins_node.astNode\n std = std_node.astNode\n meta_matched = self.metas_match(ins_node, std_node, check_meta)\n is_match = type(ins).__name__ == type(std).__name__ and meta_matched\n mapping = self.shallow_match_main(ins_node, std_node, check_meta, ignores=['name', 'args'])\n matched = False\n if is_match and mapping:\n name = ins.name\n match = _name_regex(name)\n if match[_VAR] and meta_matched: # variable\n ins._id = name\n std._id = std.name\n mapping[0].add_func_to_sym_table(ins_node, std_node) # TODO: Capture result?\n matched = True\n elif match[_WILD] and meta_matched:\n matched = True\n elif name == std.name and meta_matched:\n matched = True\n if matched:\n return mapping\n else:\n return []\n\n # noinspection PyMethodMayBeStatic\n def shallow_match_generic(self, ins_node, std_node, check_meta=True):\n \"\"\"\n Checks that all non astNode attributes are equal between ins_node and std_node\n Args:\n ins_node: Instructor ast root node\n std_node: Student AST root node\n check_meta: flag to check whether the fields of the instructor node and the student node should match\n\n Returns:\n list of AstMap: a mapping between the instructor and student root nodes (potentially empty)\n \"\"\"\n return self.shallow_match_main(ins_node, std_node, check_meta=check_meta)\n\n def shallow_match_main(self, ins_node, std_node, check_meta=True, ignores=None):\n \"\"\"\n Checks that all non astNode attributes are equal between ins_node and std_node\n Args:\n ins_node: Instructor ast root node\n std_node: Student AST root node\n check_meta: flag to check whether the fields of the instructor node and the student node should match\n ignores: a mapping between the instructor and student root nodes, or False if such a mapping doesn't exist\n\n Returns:\n\n \"\"\"\n if ignores is None:\n ignores = []\n ignores.append(\"_id\") # special exception for symbols in lookup tables\n ins = ins_node.astNode\n std = std_node.astNode\n ins_field_list = list(ast.iter_fields(ins))\n std_field_list = list(ast.iter_fields(std))\n meta_matched = self.metas_match(ins_node, std_node, check_meta)\n is_match = len(ins_field_list) == len(std_field_list) and type(ins).__name__ == type(\n std).__name__ and meta_matched\n for insTup, stdTup in zip(ins_field_list, std_field_list):\n if not is_match:\n break\n\n ins_field = insTup[0]\n ins_value = insTup[1]\n std_field = stdTup[0]\n std_value = stdTup[1]\n\n if ins_value is None:\n continue\n\n ignore_field = ins_field in ignores\n\n is_match = (ins_field == std_field) or ignore_field\n\n if not isinstance(ins_value, list):\n ins_value = [ins_value]\n\n if not isinstance(std_value, list):\n std_value = [std_value]\n\n # is_match = len(ins_value) == len(std_value)# for stretchy matching this isn't True\n # Reference ast_node_visitor.js for the original behavior and keep note of it for the purposes of handling\n # the children noting the special case when the nodes of the array are actually parameters of the node\n # (e.g. a load function) instead of a child node\n if not ignore_field:\n for inssub_value, stdsub_value in zip(ins_value, std_value):\n if not is_match:\n break\n # TODO: make this a smarter comparison, maybe handle dictionaries, f-strings, tuples, etc.\n if is_primitive(inssub_value):\n is_match = inssub_value == stdsub_value\n if is_match:\n mapping = AstMap() # return MAPPING\n mapping.add_node_pairing(ins_node, std_node)\n return [mapping]\n else:\n return []\n\n # filter function for various types of nodes\n def shallow_match(self, ins_node, std_node, check_meta=True):\n method_name = 'shallow_match_' + type(ins_node.astNode).__name__\n target_func = getattr(self, method_name, self.shallow_match_generic)\n return target_func(ins_node, std_node, check_meta=check_meta)\n\n @staticmethod\n def metas_match(ins_node, std_node, check_meta=True):\n \"\"\"\n Args:\n ins_node:\n std_node:\n check_meta:\n\n Returns:\n\n \"\"\"\n return ((check_meta and ins_node.field == std_node.field) or\n not check_meta\n # or std_node.field == _NONE_FIELD\n or ins_node.field == _NONE_FIELD)\n","src/lib/pedal/cait/__init__.py":"\"\"\"\nA package of tools for capturing student code by matching it against patterns.\n\"\"\"\n\nNAME = 'CAIT'\nSHORT_DESCRIPTION = \"Captures instructor code patterns within student code.\"\nDESCRIPTION = '''\n'''\nREQUIRES = ['Source']\nOPTIONALS = ['TIFA']\n\nfrom pedal.cait.cait_api import (find_match, find_matches,\n parse_program,\n find_submatches, find_expr_sub_matches,\n def_use_error, data_state, data_type,\n expire_cait_cache)\n","src/lib/pedal/mistakes/instructor_append.py":"from pedal.cait.cait_api import find_matches, find_expr_sub_matches, data_state\nfrom pedal.report.imperative import gently_r, explain_r\n\n\ndef append_group_on_change():\n wrong_not_append_to_list()\n\n\ndef append_group():\n missing_append_in_iteration()\n missing_append_list_initialization()\n wrong_append_list_initialization()\n wrong_not_append_to_list()\n append_list_wrong_slot()\n # TODO: add app_assign on next iteration of experiment!\n # app_assign()\n\n\ndef find_append_in(node):\n append_list = []\n calls = node.find_all(\"Call\")\n for node in calls:\n if node.func.attr == \"append\":\n append_list.append(node)\n return append_list\n\n\n\"\"\"\ndef missing_append_in_iteration():\n std_ast = parse_program()\n for_loops = std_ast.find_all(\"For\")\n for loop in for_loops:\n if len(find_append_in(loop)):\n return False\n explain(\"You must construct a list by appending values one at a time to the list.
(app_in_iter)\")\n return True\n\"\"\"\n\n\ndef missing_append_in_iteration():\n message = \"You must construct a list by appending values one at a time to the list.\"\n code = \"app_in_iter\"\n tldr = \"For Loop Append Not Found\"\n matches = find_matches(\"for ___ in ___:\\n\"\n \" __expr__\")\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n submatch = __expr__.find_matches(\"___.append(___)\")\n if submatch:\n return False\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_not_append_to_list():\n message = (\"Values can only be appended to a list. The variable {0!s}
is either not initialized, \"\n \"not initialized correctly, or is confused with another variable.\")\n code = \"app_not_list\"\n tldr = \"Not Appending to List\"\n matches = find_matches(\"for ___ in ___:\\n\"\n \" __expr__\")\n for match in matches:\n __expr__ = match[\"__expr__\"]\n submatches = __expr__.find_matches(\"_target_.append(___)\")\n for submatch in submatches:\n _target_ = submatch[\"_target_\"]\n if not data_state(_target_).was_type('list'):\n return explain_r(message.format(_target_), code, label=tldr)\n return False\n\n\ndef missing_append_list_initialization():\n message = \"The list variable {0!s}
must be initialized.\"\n code = \"no_app_list_init\"\n tldr = \"List Not Initialized\"\n matches = find_matches(\"for ___ in ___:\\n\"\n \" __expr__\")\n for match in matches:\n __expr__ = match[\"__expr__\"]\n submatches = __expr__.find_matches(\"_new_list_.append(___)\", )\n for submatch in submatches:\n _new_list_ = submatch[\"_new_list_\"].astNode\n # TODO: In theory revisit this by merging matches\n matches02 = find_matches(\"{} = []\\n\"\n \"for ___ in ___:\\n\"\n \" __expr__\".format(_new_list_.id))\n if not matches02:\n return explain_r(message.format(_new_list_.id), code, label=tldr)\n return False\n\n\ndef wrong_append_list_initialization():\n message = (\"The list variable {0!s}
is either not initialized \"\n \"correctly or mistaken for another variable. \"\n \"The list you append to should be initialized to an empty list.\")\n code = \"app_list_init\"\n tldr = \"Incorrect Initialization or Usage of Empty List\"\n matches = find_matches(\"_list_ = __expr1__\\n\"\n \"for ___ in ___:\\n\"\n \" __expr2__\")\n for match in matches:\n _list_ = match[\"_list_\"].astNode\n __expr1__ = match[\"__expr1__\"]\n __expr2__ = match[\"__expr2__\"]\n submatch = __expr2__.find_matches(\"_list_.append(___)\")\n if submatch and (__expr1__.ast_name == \"List\" and\n len(__expr1__.elts) != 0 or\n __expr1__.ast_name != \"List\"):\n return explain_r(message.format(_list_.id), code, label=tldr)\n return False\n\n\ndef append_list_wrong_slot():\n message = \"You should not append a list ({0!s}
) to {1!s}
.\"\n code = \"app_list_slot\"\n tldr = \"Appending List Error\"\n matches = find_matches(\"_target_.append(_item_)\")\n if matches:\n for match in matches:\n _item_ = match[\"_item_\"].astNode\n _target_ = match[\"_target_\"].astNode\n if data_state(_item_).was_type('list'):\n return explain_r(message.format(_item_.id, _target_.id), code, label=tldr)\n return False\n\n\ndef app_assign():\n message = (\"Appending modifies the list, so unlike addition,\"\n \" an assignment statement is not needed when using append.\")\n code = \"app_asgn\"\n\n matches = find_matches(\"_sum_ = _sum_.append(__exp__)\")\n if matches:\n return explain_r(message, code)\n return False\n","src/lib/pedal/mistakes/instructor_filter.py":"from pedal.cait.cait_api import find_match, find_matches\nfrom pedal.report.imperative import gently_r, explain_r\n\n\ndef filter_group():\n missing_if_in_for()\n append_not_in_if()\n\n\ndef missing_if_in_for():\n \"\"\"\n Name: missing_if_in_for\n Pattern:\n missing\n for - in ___ :\n if ...
- ... :\n\n Feedback: The arrangement of decision and iteration is not correct for the filter pattern.\n Returns:\n\n \"\"\"\n message = \"The arrangement of decision and iteration is not correct for the filter pattern.\"\n code = \"missing_if_in_for\"\n tldr = \"Missing if In For\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" if __expr__:\\n\"\n \" pass\")\n if not matches:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef append_not_in_if():\n \"\"\"\n Name: append_not_in_if\n Pattern:\n missing\n if ... :\n ___.append(___)\n\n Feedback: Only items satisfying some condition should be appended to the list.\n\n Returns:\n \"\"\"\n message = \"Only items satisfying some condition should be appended to the list.\"\n code = \"app_not_in_if\"\n tldr = \"Append not in if\"\n match = find_match(\"if ___:\\n\"\n \" ___.append(___)\")\n if not match:\n return explain_r(message, code, label=tldr)\n return False\n","src/lib/pedal/mistakes/instructor_histogram.py":"from pedal.cait.cait_api import find_match, find_matches, data_state\nfrom pedal.report.imperative import gently_r, explain_r\n\n\ndef histogram_group():\n histogram_argument_not_list()\n histogram_wrong_list()\n histogram_missing()\n plot_show_missing()\n\n\ndef histogram_missing():\n \"\"\"\n Name: histogram_missing\n Pattern:\n\n Missing\n plt.hist(___)\n\n Feedback: The program should display a histogram.\n\n Returns:\n \"\"\"\n message = \"The program should display a histogram.\"\n code = \"histo_missing\"\n tldr = \"Missing Histogram\"\n match = find_match(\"plt.hist(___)\")\n if not match:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef plot_show_missing():\n \"\"\"\n Name: plot_show_missing\n Pattern:\n Missing\n plt.show()\n\n Feedback: The plot must be explicitly shown to appear in the Printer area.\n\n Returns:\n \"\"\"\n message = \"The plot must be explicitly shown to appear in the Printer area.\"\n code = \"plot_show_missing\"\n tldr = \"No Plot Shown\"\n match = find_match(\"plt.show()\")\n if not match:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef histogram_argument_not_list():\n \"\"\"\n\n Name: histogram_argument_not_list\n Pattern:\n plt.hist()\n Where type() is not \"list\"\n\n Feedback: Making a histogram requires a list; is not a list.\n\n\n Returns:\n \"\"\"\n message = \"Making a histogram requires a list;
{0!s}
is not a list.\"\n code = \"hist_arg_not_list\"\n tldr = \"Making Histogram from Non-list\"\n matches = find_matches(\"plt.hist(_argument_)\")\n if matches:\n for match in matches:\n _argument_ = match[\"_argument_\"].astNode\n if not _argument_.get_data_state() or not _argument_.get_data_state().was_type('list'):\n return explain_r(message.format(_argument_.id), code, label=tldr)\n return False\n\n\ndef histogram_wrong_list():\n \"\"\"\n\n Name: histogram_wrong_list\n Pattern:\n\n for ___ in ___:\n .append(___)\n plt.hist()\n\n where name() != name()\n\n Feedback: The list created in the iteration is not the list being used to create the histogram.\n\n Returns:\n \"\"\"\n message = \"The list created in the iteration is not the list being used to create the histogram.\"\n code = \"histo_wrong_list\"\n tldr = \"Plotting Wrong List\"\n matches = find_matches(\"for ___ in ___:\\n\"\n \" __expr__\\n\"\n \"plt.hist(_list_)\")\n if matches:\n for match in matches:\n _list_ = match[\"_list_\"].astNode\n __expr__ = match[\"__expr__\"]\n submatches = __expr__.find_matches(\"_list_.append(___)\")\n if submatches:\n return False\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef histogram_wrong_placement():\n message = \"The histogram should be plotted only once, after the new list has been created\"\n code = \"histo_wrong_place\"\n tldr = \"Histogram Plot Placed Incorrectly\"\n matches = find_matches(\"for ___ in ___:\\n\"\n \" pass\\n\")\n if matches:\n matches02 = find_matches(\"plt.hist(___)\")\n for match in matches:\n if matches02:\n for match02 in matches02:\n if match02.match_lineno > match.match_lineno:\n return False\n return explain_r(message, code, label=tldr)\n","src/lib/pedal/mistakes/instructor_iteration.py":"from pedal.cait.cait_api import (parse_program, find_match, find_matches,\n find_expr_sub_matches, data_state,\n def_use_error)\nfrom pedal.report.imperative import gently_r, explain_r\n\n\ndef iteration_group():\n list_initialization_misplaced()\n wrong_target_is_list()\n wrong_list_repeated_in_for()\n missing_iterator_initialization()\n list_not_initialized_on_run()\n wrong_iterator_not_list()\n missing_target_slot_empty()\n missing_for_slot_empty()\n wrong_target_reassigned()\n\n\ndef iteration_group_on_change():\n wrong_target_is_list()\n wrong_list_repeated_in_for()\n wrong_iterator_not_list()\n\n\ndef all_for_loops():\n std_ast = parse_program()\n return std_ast.find_all(\"For\")\n\n\n# this conflics with list_repeated_in_for\ndef wrong_target_is_list():\n message = ('The variable {0!s}
is a list and '\n 'should not be placed in the iteration variable slot of the \"for\" block')\n code = \"target_is_list\"\n tldr = \"Iteration Variable Overwriting List\"\n match = find_match(\"for _item_ in ___:\\n pass\")\n if match:\n _item_ = match[\"_item_\"].astNode\n if data_state(_item_).was_type('list'):\n return explain_r(message.format(_item_.id), code, label=tldr)\n return False\n\n\n# this conflicts with list_in_wrong_slot_in_for\ndef wrong_list_repeated_in_for():\n message = 'The {0!s}
variable can only appear once in the \"for\" block.'\n code = \"list_repeat\"\n tldr = \"Duplicate Iteration Variable\"\n match = find_match(\"for _item_ in _item_:\\n pass\")\n if match:\n _item_ = match[\"_item_\"].astNode\n if data_state(_item_).was_type('list'):\n return explain_r(message.format(_item_.id), code, label=tldr)\n return False\n\n\n# this isn't consistent with the pattern you wrote TODO: Fix this\ndef missing_iterator_initialization():\n message1 = \"The slot to hold a list in the iteration is empty.\"\n code1 = \"no_iter_init-blank\"\n tldr1 = \"Iteration Variable is Blank\"\n\n message2 = \"The variable {0!s}
is in the list slot of the iteration but is not a list.\"\n code2 = \"no_iter_init\"\n tldr2 = \"Iteration Variable is Not a List\"\n\n match = find_match(\"for ___ in _list_:\\n pass\")\n if match:\n _list_ = match[\"_list_\"].astNode\n if _list_.id == \"___\":\n return explain_r(message1, code1, label=tldr1)\n elif not data_state(_list_).was_type('list'):\n return explain_r(message2.format(_list_.id), code2, label=tldr2)\n return False\n\n\n# TODO: We need to cover the different cases for these\ndef wrong_iterator_not_list():\n message = (\"The variable {0!s}
has been set to something that is not a list but is placed \"\n \"in the iteration block that must be a list.\")\n code = \"iter_not_list\"\n tldr = \"Iteration List is not list\"\n\n match = find_match(\"for ___ in _item_:\\n pass\")\n if match:\n _item_ = match[\"_item_\"].astNode\n if not data_state(_item_).was_type('list'):\n return explain_r(message.format(_item_.id), code, label=tldr)\n return False\n\n\ndef missing_target_slot_empty():\n message = \"You must fill in the empty slot in the iteration.\"\n code = \"target_empty\"\n tldr = \"Missing Iteration Variable\"\n match = find_match(\"for _item_ in ___:\\n pass\")\n if match:\n _item_ = match[\"_item_\"].astNode\n if _item_.id == \"___\":\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef list_not_initialized_on_run():\n message = \"The list in your for loop has not been initialized.\"\n code = \"no_list_init\"\n tldr = \"List Variable Uninitialized\"\n match = find_match(\"for ___ in _item_:\\n pass\")\n if match:\n _item_ = match[\"_item_\"][0].astNode\n if def_use_error(_item_):\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef list_initialization_misplaced():\n message = \"Initialization of {0!s}
is a list but either in the wrong place or redefined\"\n code = \"list_init_misplaced\"\n tldr = \"Iterating over Non-list\"\n match = find_match(\"for ___ in _item_:\\n pass\")\n if match:\n _item_ = match[\"_item_\"][0].astNode\n if data_state(_item_).was_type('list') and def_use_error(_item_):\n return explain_r(message.format(_item_.id), code, label=tldr)\n return False\n\n\ndef missing_for_slot_empty():\n message = \"You must fill in the empty slot in the iteration.\"\n code = \"for_incomplete\"\n tldr = \"Iteration Incomplete\"\n match = find_match(\"for _item_ in _list_:\\n pass\")\n if match:\n _item_ = match[\"_item_\"][0].astNode\n _list_ = match[\"_list_\"][0].astNode\n if _item_.id == \"___\" or _list_.id == \"___\":\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_target_reassigned():\n message = \"The variable {0!s}
has been reassigned. The iteration variable shouldn't be reassigned\"\n code = \"target_reassign\"\n tldr = \"Iteration Variable has been Reassigned\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" __expr__\")\n for match in matches:\n __expr__ = match[\"__expr__\"]\n _item_ = match[\"_item_\"][0]\n submatches = __expr__.find_matches(\"_item_ = ___\")\n if submatches:\n return explain_r(message.format(_item_), code, label=tldr)\n return False\n","src/lib/pedal/mistakes/iteration_context.py":"from pedal.cait.cait_api import (parse_program,\n find_matches, find_match,\n find_expr_sub_matches)\nfrom pedal.report.imperative import explain, gently\nimport pedal.mistakes.instructor_append as append_api\nfrom pedal.toolkit.utilities import *\nfrom pedal.sandbox.compatibility import get_output\nfrom pedal.report.imperative import gently_r, explain_r\n\n\n# ################8.2 Start#######################\ndef wrong_list_length_8_2():\n message = \"You must have at least three pieces\"\n code = \"list length_8.2\"\n tldr = \"List too short\"\n matches = find_matches(\"_list_ = __expr__\")\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n if __expr__.ast_name == \"List\" and len(__expr__.elts) < 3:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef missing_list_initialization_8_2():\n message = ('You must set the variable shopping_cart
'\n 'to a list containing the prices of items in the shopping cart.')\n code = \"missing_list_init_8.2\"\n tldr = \"Missing list initialization\"\n matches = find_matches(\"shopping_cart = __expr__\")\n for match in matches:\n __expr__ = match[\"__expr__\"]\n if __expr__.ast_name == \"List\":\n return False\n return explain_r(message, code, label=tldr)\n\n\ndef wrong_list_is_constant_8_2():\n message = 'You must set shoppping_cart
to a list of values not to a single number.'\n code = \"list_is_const_8.2\"\n tldr = \"Shopping Cart not set to list\"\n matches = find_matches(\"shopping_cart = __expr__\")\n for match in matches:\n __expr__ = match[\"__expr__\"]\n if __expr__.ast_name == \"Num\":\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef list_all_zeros_8_2():\n message = 'Try seeing what happens when you change the numbers in the list.'\n code = 'default_list_8.2'\n tldr = 'Use different numbers'\n std_ast = parse_program()\n lists = std_ast.find_all('List')\n is_all_zero = True\n for init_list in lists:\n for node in init_list.elts:\n if node.ast_name == 'Num' and node.n != 0:\n is_all_zero = False\n break\n if is_all_zero:\n break\n if is_all_zero:\n return explain_r(message, code, label=tldr)\n return False\n\n\n# ################8.2 End#######################\n\n\n# ################8.3 Start#######################\ndef wrong_list_initialization_placement_8_3():\n message = ('The list of episode lengths (episode_length_list
)'\n ' must be initialized before the iteration which uses this list.')\n code = \"init_place_8.3\"\n tldr = \"Wrong Initialization Placement\"\n for_matches = find_matches(\"for ___ in ___:\\n\"\n \" pass\")\n init_matches = find_matches(\"episode_length_list = ___\")\n if init_matches and for_matches:\n for for_match in for_matches:\n for_lineno = for_match.match_lineno\n for init_match in init_matches:\n if init_match.match_lineno > for_lineno:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_accumulator_initialization_placement_8_3():\n message = ('The variable to hold the sum of the episode lengths (sum_length
) '\n 'must be initialized before the iteration which uses this variable.')\n code = \"accu_init_place_8.3\"\n tldr = \"Accumulator initialization misplaced\"\n for_matches = find_matches(\"for ___ in ___:\"\n \" pass\")\n init_matches = find_matches(\"sum_length = 0\")\n if init_matches and for_matches:\n for for_match in for_matches:\n for_lineno = for_match.match_lineno\n for init_match in init_matches:\n if init_match.match_lineno > for_lineno:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_iteration_body_8_3():\n message = \"The addition of each episode length to the total length is not in the correct place.\"\n code = \"iter_body_8.3\"\n tldr = \"Accumulation Misplaced\"\n match = find_match(\"for _item_ in _list_:\\n\"\n \" sum_length = ___ + ___\\n\")\n if not match:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_print_8_3():\n message = ('The output of the total length of time is not in the correct place. The total length of time should be'\n ' output only once after the total length of time has been computed.')\n code = \"print_8.3\"\n tldr = \"Print statement misplaced\"\n match = find_match(\"for _item_ in _list_:\\n\"\n \" pass\\n\"\n \"print(_total_)\")\n if not match:\n return explain_r(message, code, label=tldr)\n return False\n\n\n# ################8.3 End#######################\n\n\n# ################8.4 Start#######################\ndef missing_target_slot_empty_8_4():\n message = 'You must fill in the empty slot in the iteration.'\n code = 'target_empty_8.4'\n tldr = \"Iteration Variable Empty\"\n matches = find_matches(\"for _item_ in pages_count_list:\\n\"\n \" pass\")\n if matches:\n for match in matches:\n _item_ = match[\"_item_\"][0]\n if _item_.id == \"___\":\n return explain_r(message, code, tldr)\n return False\n\n\ndef missing_addition_slot_empty_8_4():\n message = \"You must fill in the empty slot in the addition.\"\n code = \"add_empty_8.4\"\n tldr = \"Addition Blank\"\n matches = find_matches(\"sum_pages + _item_\")\n if matches:\n for match in matches:\n _item_ = match[\"_item_\"][0]\n if _item_.id == \"___\":\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_names_not_agree_8_4():\n message = \"Each value of {0!s}
must be added to {1!s}
.\"\n code = \"name_agree_8.4\"\n tldr = \"Iteration Variable and Accumulation Mismatch\"\n matches = find_matches(\"for _item1_ in pages_count_list:\\n\"\n \" sum_pages = sum_pages + _item2_\")\n if matches:\n for match in matches:\n # in theory, these will always be different? should test in test_cait\n _item1_ = match[\"_item1_\"][0]\n _item2_ = match[\"_item2_\"][0]\n if _item1_.id != _item2_.id:\n return explain_r(message.format(_item1_.id, _item2_.id), code, label=tldr)\n return False\n\n\n# ################8.4 End#######################\ndef wrong_modifying_list_8_5():\n \"\"\"\n\n # old code for record keeping because significantly different semantics\n std_ast = parse_program()\n list_init = std_ast.find_all('List')\n true_sum = 0\n if len(list_init) != 0:\n for value in list_init[0].elts:\n true_sum = value.n + true_sum\n if true_sum != sum([20473, 27630, 17849, 19032, 16378]) or len(list_init) == 0:\n explain('Don\\'t modify the list
(mod_list_8.5)')\n return True\n return False\n\n Returns:\n \"\"\"\n message = \"Don't modify the list\"\n code = \"mod_list_8.5\"\n match = find_match(\"[20473, 27630, 17849, 19032, 16378]\")\n if not match:\n return explain_r(message, code)\n return False\n\n\ndef wrong_modifying_list_8_6():\n \"\"\"\n std_ast = parse_program()\n list_init = std_ast.find_all('List')\n true_sum = 0\n for value in list_init[0].elts:\n true_sum = value.n + true_sum\n if true_sum != sum([2.9, 1.5, 2.3, 6.1]):\n explain('Don\\'t modify the list
(mod_list_8.6)')\n Returns:\n \"\"\"\n message = \"Don't modify the list\"\n code = \"mod_list_8.6\"\n match = find_match(\"_list_ = [2.9, 1.5, 2.3, 6.1]\")\n if not match:\n return explain_r(message, code)\n return False\n\n\ndef wrong_should_be_counting():\n \"\"\"\n std_ast = parse_program()\n for_loops = std_ast.find_all('For')\n for loop in for_loops:\n iter_prop = loop.target\n assignments = loop.find_all('Assign')\n for assignment in assignments:\n binops = assignment.find_all('BinOp')\n for binop in binops:\n if binop.has(iter_prop) and binop.op == 'Add':\n explain('This problem asks for the number of items in the list not the total of all the values in '\n 'the list.
(not_count)')\n Returns:\n \"\"\"\n message = \"This problem asks for the number of items in the list not the total of all the values in the list.\"\n code = \"not_count\"\n tldr = \"Summing instead of counting\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" __expr__\")\n if matches:\n for match in matches:\n _item_ = match[\"_item_\"][0]\n __expr__ = match[\"__expr__\"]\n submatches = __expr__.find_matches(\"___ = ___ + _item_\")\n if submatches:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_should_be_summing():\n \"\"\"\n std_ast = parse_program()\n for_loops = std_ast.find_all('For')\n for loop in for_loops:\n assignments = loop.find_all('Assign')\n for assignment in assignments:\n binops = assignment.find_all('BinOp')\n for binop in binops:\n if binop.has(1) and binop.op == 'Add':\n explain('This problem asks for the total of all the values in the list not the number of items in '\n 'the list.
(not_sum)')\n \"\"\"\n message = \"This problem asks for the total of all the values in the list not the number of items in the list.\"\n code = \"not_sum\"\n tldr = \"Counting instead of summing\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" __expr__\")\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n submatches = __expr__.find_matches(\"___ = 1 + ___\", )\n if submatches:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef missing_addition_slot_empty():\n \"\"\"\n std_ast = parse_program()\n assignments = std_ast.find_all('Assign')\n for assignment in assignments:\n # left = assignment.target\n right = assignment.value\n binOp = right.find_all('BinOp')\n if len(binOp) == 1:\n binOp = binOp[0]\n if binOp.op == 'Add':\n if binOp.left.ast_name == 'Name' and binOp.right.ast_name == 'Name':\n if binOp.left.id == '___' or binOp.right.id == '___':\n explain('You must fill in the empty slot in the addition.
(add_empty)')\n return True\n return False\n Returns:\n \"\"\"\n message = \"You must fill in the empty slot in the addition.\"\n code = \"add_empty\"\n tldr = \"Addition Blank\"\n matches = find_matches(\"___ + _item_\")\n if matches:\n for match in matches:\n _item_ = match[\"_item_\"][0]\n if _item_.id == \"___\":\n return explain_r(message, code, tldr)\n return False\n\n\ndef wrong_cannot_sum_list():\n \"\"\"\n\n std_ast = parse_program()\n for_loops = std_ast.find_all('For')\n for loop in for_loops:\n list_prop = loop.iter\n assignments = loop.find_all('Assign')\n for assignment in assignments:\n binops = assignment.find_all('BinOp')\n for binop in binops:\n if binop.has(list_prop) and binop.op == 'Add':\n explain('Addition can only be done with a single value at a time, not with an entire list at one'\n ' time.
(sum_list)')\n Returns:\n \"\"\"\n message = 'Addition can only be done with a single value at a time, not with an entire list at one'\n code = \"sum_list\"\n tldr = \"Cannot Sum a List\"\n matches = find_matches(\"for ___ in _list_ :\\n\"\n \" __expr__\")\n if matches:\n for match in matches:\n _list_ = match[\"_list_\"][0]\n __expr__ = match[\"__expr__\"]\n # submatches = __expr__.find_matches(\"___ = ___ + {}\".format(_list_.id), )\n submatches = __expr__.find_matches(\"___ = ___ + _list_\")\n if submatches:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef missing_no_print():\n message = \"Program does not output anything.\"\n code = \"no_print\"\n tldr = \"Missing Output\"\n prints = find_match('print(___)', cut=True)\n if not prints:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef missing_counting_list():\n \"\"\"\n std_ast = parse_program()\n has_count = False\n for_loops = std_ast.find_all('For')\n if len(for_loops) > 0:\n for loop in for_loops:\n assignments = loop.find_all('Assign')\n if len(assignments) < 1:\n continue\n for assignment in assignments:\n binops = assignment.find_all('BinOp')\n if len(binops) < 1:\n continue\n lhs = assignment.target\n for binop in binops:\n if binop.has(lhs) and binop.has(1) and binop.op == 'Add':\n has_count = True\n if not has_count:\n explain('Count the total number of items in the list using iteration.
(miss_count_list)')\n Returns:\n \"\"\"\n message = 'Count the total number of items in the list using iteration.'\n code = \"miss_count_list\"\n tldr = \"Missing Count in Iteration\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" __expr__\")\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n submatches = __expr__.find_matches(\"_sum_ = _sum_ + 1\", )\n if submatches:\n return False\n return explain_r(message, code, label=tldr)\n\n\ndef missing_summing_list():\n \"\"\"\n std_ast = parse_program()\n has_total = False\n for_loops = std_ast.find_all('For')\n if len(for_loops) > 0:\n for loop in for_loops:\n assignments = loop.find_all('Assign')\n if len(assignments) < 1:\n continue\n iter_prop = loop.target\n for assignment in assignments:\n binops = assignment.find_all('BinOp')\n if len(binops) < 1:\n continue\n lhs = assignment.target\n for binop in binops:\n if binop.has(lhs) and binop.has(iter_prop) and binop.op == 'Add':\n has_total = True\n if not has_total:\n explain('Sum the total of all list elements using iteration.
(miss_sum_list)')\n Returns:\n \"\"\"\n message = 'Sum the total of all list elements using iteration.'\n code = \"miss_sum_list\"\n tldr = \"Missing Sum in Iteration\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" __expr__\")\n if matches:\n for match in matches:\n _item_ = match[\"_item_\"][0]\n __expr__ = match[\"__expr__\"]\n submatches = __expr__.find_matches(\"_sum_ = _sum_ + _item_\")\n if submatches:\n return False\n return explain_r(message, code, label=tldr)\n\n\ndef missing_zero_initialization():\n \"\"\"\n\n std_ast = parse_program()\n for_loops = std_ast.find_all('For')\n accumulator = None\n loop_acu = None\n for loop in for_loops:\n assignments = loop.find_all('Assign')\n for assignment in assignments:\n binops = assignment.find_all('BinOp')\n if len(binops) > 0:\n lhs = assignment.target\n for binop in binops:\n if binop.has(lhs) and binop.op == 'Add':\n accumulator = lhs\n loop_acu = loop\n accu_init = False\n if accumulator is not None:\n assignments = std_ast.find_all('Assign')\n for assignment in assignments:\n if loop_acu.lineno > assignment.lineno:\n lhs = assignment.target\n if lhs.id == accumulator.id and assignment.has(0):\n accu_init = True\n break\n if not accu_init and accumulator is not None:\n explain('The addition on the first iteration step is not correct because either the variable '\n '{0!s}
has not been initialized to an appropriate initial value or it has not been placed'\n ' in an appropriate location
(miss_zero_init)'.format(accumulator.id))\n return False\n return True\n Returns:\n \"\"\"\n\n message = ('The addition on the first iteration step is not correct because either the variable {0!s}
'\n 'has not been initialized to an appropriate initial value '\n 'or it has not been placed in an appropriate location')\n code = \"miss_zero_init\"\n tldr = \"Missing Initialization for Accumulator\"\n matches01 = find_matches(\"for ___ in ___:\\n\"\n \" __expr__\")\n if matches01:\n for match01 in matches01:\n __expr__ = match01[\"__expr__\"]\n submatches01 = __expr__.find_matches(\"_sum_ = _sum_ + ___\", )\n if submatches01:\n for submatch01 in submatches01:\n _sum_ = submatch01[\"_sum_\"][0]\n matches02 = find_matches((\"{} = 0\\n\"\n \"for ___ in ___:\\n\"\n \" __expr__\").format(_sum_.id))\n if not matches02:\n return explain_r(message.format(_sum_.id), code, label=tldr)\n return False\n\n\ndef wrong_printing_list():\n message = 'You should be printing a single value.'\n code = \"list_print\"\n tldr = \"Printing in Iteration\"\n matches = find_matches(\"for ___ in ___:\\n\"\n \" __expr__\")\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n if __expr__.find_matches(\"print(___)\", ):\n return explain_r(message, code, label=tldr)\n return False\n\n\n# TODO: This might be reason to rethink letting instructor symbols map to multiple items\ndef missing_average():\n message = \"An average value is not computed.<\"\n code = \"no_avg\"\n tldr = \"Missing Computation\"\n matches_missing = find_matches(\"for ___ in ___:\\n\"\n \" pass\\n\"\n \"__expr__\")\n matches = []\n if matches_missing:\n for match in matches_missing:\n __expr__ = match[\"__expr__\"]\n sub_matches = __expr__.find_matches(\"_total_/_count_\", )\n if sub_matches:\n for sub_match in sub_matches:\n _total_ = sub_match[\"_total_\"][0]\n _count_ = sub_match[\"_count_\"][0]\n if _total_.id != _count_.id:\n matches.append(match)\n if not len(matches) > 0:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef warning_average_in_iteration():\n message = ('An average value is best computed after the properties name {0!s}
(total) and '\n '{1!s}
are completely known rather than recomputing the average on each iteration.')\n code = \"avg_in_iter\"\n tldr = \"Redundant Average Calculation\"\n matches = find_matches(\"for ___ in ___:\\n\"\n \" __expr__\\n\")\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n submatches = __expr__.find_matches(\"_average_ = _total_/_count_\", )\n if submatches:\n for submatch in submatches:\n _total_ = submatch[\"_total_\"][0]\n _count_ = submatch[\"_count_\"][0]\n _average_ = submatch[\"_average_\"][0]\n if _total_.id != _count_.id != _average_.id and _total_.id != _average_.id:\n return explain_r(message.format(_total_.id, _count_.id), code, label=tldr)\n\n return False\n\n\ndef wrong_average_denominator():\n message = \"The average is not calculated correctly.\"\n code = \"avg_denom\"\n tldr = \"Incorrect Average Calculation\"\n matches = find_matches(\"for ___ in ___:\\n\"\n \" __expr__\\n\" # where expr contains _count_ = _count_ + 1\n \"__expr2__\") # where expr2 contains ___/_value_\n # where _value_.id != _count_.id\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n __expr2__ = match[\"__expr2__\"]\n # _value_ = match[\"_value_\"][0]\n submatches = __expr__.find_matches(\"_count_ = _count_ + 1\", )\n submatches02 = find_expr_sub_matches(\"___/_value_\", __expr2__)\n if submatches and submatches02:\n for submatch in submatches:\n for submatch02 in submatches02:\n _count_ = submatch[\"_count_\"][0]\n _value_ = submatch02[\"_value_\"][0]\n if _count_.id != _value_.id:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_average_numerator():\n message = \"The average is not calculated correctly.\"\n code = \"avg_numer\"\n tldr = \"Incorrect Average Calculation\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" __expr__\\n\" # where expr contains _total_ = _total_ + 1\n \"__expr2__\") # where expr2 contains _value_/___\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n __expr2__ = match[\"__expr2__\"]\n _item_ = match[\"_item_\"][0]\n # TODO: In theory, we could merge these matches to match variables...\n submatches = __expr__.find_matches(\"_total_ = _total_ + _item_\")\n # submatches02 = find_expr_sub_matches(\"_value_/___\", __expr2__)\n submatches02 = __expr2__.find_matches(\"_value_/___\")\n if submatches and submatches02:\n for submatch in submatches:\n for submatch02 in submatches02:\n _value_ = submatch02[\"_value_\"][0]\n _total_ = submatch[\"_total_\"][0]\n if _total_.id != _value_.id:\n return explain_r(message, code, label=tldr)\n return False\n\n\n# #######################AVERAGE END###########################\ndef wrong_compare_list():\n message = \"Each item in the list {0!s}
must be compared one item at a time.\"\n code = \"comp_list\"\n tldr = \"Not Comparing Each Item\"\n matches = find_matches(\"for ___ in _list_:\\n\"\n \" if __expr__:\\n\"\n \" pass\")\n if matches:\n for match in matches:\n _list_ = match[\"_list_\"][0]\n __expr__ = match[\"__expr__\"]\n if __expr__.has(_list_.astNode):\n return explain_r(message.format(_list_.id), code, label=tldr)\n return False\n\n\ndef wrong_for_inside_if():\n message = \"The iteration should not be inside the decision block.\"\n code = \"for_in_if\"\n tldr = \"For inside if\"\n match = find_match(\"if ___:\\n\"\n \" for ___ in ___:\\n\"\n \" pass\")\n if match:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef iterator_is_function():\n message = \"You should make a variable for the list instead of using a function call for the list\"\n code = \"iter_is_func\"\n tldr = \"Using Function Call instead of List\"\n std_ast = parse_program()\n for_loops = std_ast.find_all('For')\n # noinspection PyBroadException\n try:\n for loop in for_loops:\n list_prop = loop.iter\n if list_prop.ast_name == 'Call':\n return explain_r(message, code, label=tldr)\n except Exception:\n return False\n return False\n\n\n# ##########################9.1 START############################\ndef wrong_list_initialization_9_1():\n message = \"The list of rainfall amounts (rainfall_list
) is not initialized properly.\"\n code = \"list_init_9.1\"\n tldr = \"Incorrect List Initialization\"\n match = find_match('rainfall_list = weather.get(\"Precipitation\",\"Location\",\"Blacksburg, VA\")')\n if not match:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_accumulator_initialization_9_1():\n message = (\"The variable to hold the total value of the rainfall amounts (rainfall_sum
) \"\n \"is not initialized properly.\")\n code = \"accu_init_9.1\"\n tldr = \"Incorrect Accumulation Variable initialization\"\n match = find_match(\"rainfall_sum = 0\")\n if not match:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_accumulation_9_1():\n message = \"The addition of each rainfall amount to rainfall_sum
is not correct.\"\n code = \"accu_9.1\"\n tldr = \"Incorrect Accumulation Statement\"\n matches = find_matches(\"rainfall_sum = _item_ + rainfall\")\n if matches:\n for match in matches:\n _item_ = match[\"_item_\"][0]\n if _item_.id != \"rainfall_sum\":\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_list_initialization_placement_9_1():\n message = (\"The list of rainfall amount (rainfall_list
) \"\n \"must be initialized before the iteration that uses this list.\")\n code = \"list_init_place_9.1\"\n tldr = \"List initialization Misplaced or Missing\"\n match = find_match(\"rainfall_list = ___\\n\"\n \"for _item_ in _list_:\\n\"\n \" pass\")\n if not match:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_accumulator_initialization_placement_9_1():\n message = (\"The variable for the sum of all the rainfall amounts (rainfall_sum
) \"\n \"must be initialized before the iteration which uses this variable.\")\n code = \"accu_init_place_9.1\"\n tldr = \"Accumulator Initialization Misplaced or missing\"\n matches = find_matches(\"rainfall_sum = ___\\n\"\n \"for _item_ in _list_:\\n\"\n \" pass\")\n if not matches:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_iteration_body_9_1():\n message = \"The addition of each rainfall amount to the total rainfall is not in the correct place.\"\n code = \"iter_body_9.1\"\n tldr = \"Accumulation Statement Misplaced or Missing\"\n matches = find_matches(\"for _item_ in _list_:\\n\"\n \" rainfall_sum = ___\")\n if not matches:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_print_9_1():\n \"\"\"\n Returns:\n \"\"\"\n message = ('The output of the total rainfall amount is not in the correct place. The total rainfall should be '\n 'output only once after the total rainfall has been computed.')\n code = \"print_9.1\"\n tldr = \"Print Statement Misplaced or Missing\"\n match = find_match(\"for _item_ in _list_:\\n\"\n \" pass\\n\"\n \"print(_total_)\")\n if not match:\n return explain_r(message, code, label=tldr)\n return False\n\n\n# ##########################9.1 END############################\n\n\n# ##########################9.2 START############################\ndef wrong_list_initialization_9_2():\n message = \"The list of rainfall amounts (rainfall_list
) is not initialized properly.\"\n code = \"list_init_9.2\"\n tldr = \"Incorrect List Initialization\"\n matches = find_matches('rainfall_list = weather.get(\"Precipitation\",\"Location\",\"Blacksburg, VA\")')\n if not matches:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_accumulator_initialization_9_2():\n message = (\"The variable to hold the total value of the rainfall amounts \"\n \"(rainfall_count
) is not initialized properly.\")\n code = \"accu_init_9.2\"\n tldr = \"Incorrect Initialization\"\n if not find_matches(\"rainfall_count = 0\"):\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_accumulation_9_2():\n message = ('The adding of another day with rainfall to the total '\n 'count of days with rainfall (rainfall_count
) is not correct.')\n code = \"accu_9.2\"\n tldr = \"Accumulation Statement Incorrect\"\n matches = find_matches(\"rainfall_count = _item_ + 1\")\n if matches:\n for match in matches:\n _item_ = match[\"_item_\"][0]\n if _item_.id != \"rainfall_count\":\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_list_initialization_placement_9_2():\n message = (\"The list of rainfall amount (rainfall_list
) \"\n \"must be initialized before the iteration that uses this list.\")\n code = \"list_init_place_9.2\"\n tldr = \"Incorrect List Initialization Placement\"\n matches = find_matches(\"rainfall_list = ___\\n\"\n \"for _item_ in _list_:\\n\"\n \" pass\")\n if not matches:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_accumulator_initialization_placement_9_2():\n message = (\"The variable for the count of the number of days having rain (rainfall_count
) \"\n \"must be initialized before the iteration which uses this variable.\")\n code = \"accu_init_place_9.2\"\n tldr = \"Accumulator Initialization Misplaced\"\n matches = find_matches(\"rainfall_count = ___\\n\"\n \"for _item_ in _list_:\\n\"\n \" pass\")\n if not matches:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_iteration_body_9_2():\n message = (\"The test (if) to determine if a given amount \"\n \"of rainfall is greater than (>) zero is not in the correct place.\")\n code = \"iter_body_9.2\"\n tldr = \"If statement misplaced\"\n matches = find_matches(\"for _item_ in _list_:\\n\"\n \" if __expr__:\\n\"\n \" pass\")\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n if __expr__.numeric_logic_check(1, 'var > 0'):\n return False\n return explain_r(message, code, label=tldr)\n\n\ndef wrong_decision_body_9_2():\n message = (\"The increase by 1 in the number of days having rainfall \"\n \"(rainfall_count
) is not in the correct place.\")\n code = \"dec_body_9.2\"\n tldr = \"Accumulation Statement Misplaced\"\n matches = find_matches(\"if __expr__:\\n\"\n \" rainfall_count = rainfall_count + 1\")\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n if __expr__.numeric_logic_check(1, 'var > 0'):\n return False\n return explain_r(message, code, label=tldr)\n\n\ndef wrong_print_9_2():\n message = (\"The output of the total number of days with rainfall is not in the correct place. The total number of \"\n \"days should be output only once after the total number of days has been computed.\")\n code = \"print_9.2\"\n tldr = \"Misplaced Print Statement\"\n match = find_match(\"for _item_ in _list_:\\n\"\n \" pass\\n\"\n \"print(_total_)\")\n if not match:\n return explain_r(message, code, label=tldr)\n return False\n\n\n# ##########################9.2 END############################\n\n\n# ##########################9.6 START############################\ndef wrong_comparison_9_6():\n message = \"In this problem you should be finding temperatures above 80 degrees.\"\n code = \"comp_9.6\"\n tldr = \"Incorrect Comparison Statement\"\n matches = find_matches(\"if __comp__:\\n\"\n \" pass\")\n if matches:\n for match in matches:\n __comp__ = match[\"__comp__\"]\n if not __comp__.numeric_logic_check(1, 'var > 80'):\n return explain_r(message, code, label=tldr)\n return False\n\n\n# ##########################9.6 END############################\n\n\n# ##########################10.2 START############################\ndef wrong_conversion_10_2():\n \"\"\"\n '''missing\n for _target_ in ____ :\n _target_ * 0.4\n '''\n Returns:\n \"\"\"\n message = \"The conversion of {0!s}
to inches is either missing, incorrect, or misplaced.\"\n code = \"conv_10.2\"\n tldr = \"Incorrect/Missing Conversion\"\n matches = find_matches(\"for _target_ in ___:\\n\"\n \" __expr__\")\n for match in matches:\n # code version 1 start\n _target_ = match[\"_target_\"][0]\n __expr__ = match[\"__expr__\"]\n matches02 = __expr__.find_matches(\"_target_*0.04\".format(_target_.id))\n if matches02:\n return False\n return explain_r(message.format(_target_.id), code, label=tldr)\n return False\n\n\n# ##########################10.2 END############################\n\n\n# ##########################10.3 START############################\ndef wrong_filter_condition_10_3():\n message = \"The condition used to filter the year when artists died is not correct.\"\n code = \"filt_10.3\"\n tldr = \"Incorrect Condition\"\n matches = find_matches(\"if __expr__:\\n\"\n \" pass\")\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n if __expr__.numeric_logic_check(1, \"var > 0\") or __expr__.numeric_logic_check(1, \"var != 0\"):\n return False\n return explain_r(message, code, label=tldr)\n return False\n\n\n# ##########################10.3 END############################\n\n\n# ##########################10.4 START############################\ndef wrong_and_filter_condition_10_4():\n message = (\"The condition used to filter the temperatures \"\n \"into the specified range of temperatures is not correct.\")\n code = \"filt_and_10.4\"\n tldr = \"Incorrect Condition Statement\"\n matches = find_matches(\"for _temp_ in _list_:\\n\"\n \" if __expr__:\\n\"\n \" pass\")\n if matches:\n for match in matches:\n _temp_ = match[\"_temp_\"][0]\n __expr__ = match[\"__expr__\"]\n if (__expr__.has(_temp_.astNode) and\n not __expr__.numeric_logic_check(1, \"32 <= temp <= 50\")):\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_nested_filter_condition_10_4():\n message = (\"The decisions used to filter the temperatures into \"\n \"the specified range of temperatures is not correct.\")\n code = \"nest_filt_10.4\"\n tldr = \"Incorrect Set of Decisions\"\n matches = find_matches(\"for _temp_ in _list_:\\n\"\n \" if __cond1__:\\n\"\n \" if __cond2__:\\n\"\n \" pass\")\n if matches:\n for match in matches:\n _temp_ = match[\"_temp_\"][0].astNode\n __cond1__ = match[\"__cond1__\"]\n __cond2__ = match[\"__cond2__\"]\n if not (__cond1__.has(_temp_) and __cond2__.has(_temp_) and (\n __cond1__.numeric_logic_check(1, \"32 <= temp\") and __cond2__.numeric_logic_check(1, \"temp <= 50\") or\n __cond2__.numeric_logic_check(1, \"32 <= temp\") and\n __cond1__.numeric_logic_check(1, \"temp <= 50\"))):\n return explain_r(message, code, label=tldr)\n return False\n\n\n# ##########################10.4 END############################\n\n\n# ########################10.5 START###############################\ndef wrong_conversion_problem_10_5():\n message = \"The conversion from kilometers to miles is not correct.\"\n code = \"conv_10.5\"\n tldr = \"Incorrect Conversion\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" __expr__\")\n if matches:\n for match in matches:\n _item_ = match[\"_item_\"][0]\n __expr__ = match[\"__expr__\"]\n matches02 = __expr__.find_matches(\"_item_*0.62\")\n if matches02:\n return False\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_filter_problem_atl1_10_5():\n \"\"\"\n find pattern where expression is equal to _item_*0.62 and\n where the condition is not equivalent to _expr_ > 10\n Returns:\n \"\"\"\n message = \"You are not correctly filtering out values from the list.\"\n code = \"filt_alt1_10.5\"\n tldr = \"Incorrect Filter Statement\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" if __cond__:\\n\"\n \" _list_.append(__expr__)\")\n if matches:\n for match in matches:\n _item_ = match[\"_item_\"][0].astNode\n __cond__ = match[\"__cond__\"]\n __expr__ = match[\"__expr__\"]\n # matches02 = __expr__.find_matches(\"{0!s}*0.62\".format(_item_.id))\n matches02 = __expr__.find_matches(\"_item_*0.62\")\n if matches02:\n for match02 in matches02:\n if (__cond__.has(_item_) and\n not __cond__.numeric_logic_check(0.1, \"item > 16.1290322580645\")):\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_filter_problem_atl2_10_5():\n message = \"You are not correctly filtering out values from the list.\"\n code = \"filt_alt2_10.5\"\n tldr = \"Incorrect Filter Statement\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" _miles_ = __expr__\\n\"\n \" if __cond__:\\n\"\n \" _list_.append(_miles_)\")\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n __cond__ = match[\"__cond__\"]\n _item_ = match[\"_item_\"][0].astNode\n _miles_ = match[\"_miles_\"][0].astNode\n matches02 = __expr__.find_matches(\"_item_*0.62\")\n for _ in matches02:\n if not (__cond__.has(_miles_) and\n __cond__.numeric_logic_check(1, \"_item_ > 10\")):\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_append_problem_atl1_10_5():\n message = \"You are not appending the correct values.
(app_alt1_10.5\"\n code = \"app_alt1_10.5\"\n tldr = \"Incorrect Value Appended\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" if __cond__:\\n\"\n \" _list_.append(__expr__)\")\n if matches:\n for match in matches:\n _item_ = match[\"_item_\"][0].astNode\n __cond__ = match[\"__cond__\"]\n __expr__ = match[\"__expr__\"]\n if (__cond__.numeric_logic_check(0.1, \"item > 16.1290322580645\") and\n __cond__.has(_item_)):\n # new_code = \"{}*0.62\".format(_item_.id)\n new_code = \"_item_*0.62\"\n matches02 = __expr__.find_matches(new_code)\n if not matches02:\n return explain_r(message, code, label=tldr)\n return False\n\n\ndef wrong_append_problem_atl2_10_5():\n message = \"You are not appending the correct values.\"\n code = \"app_alt2_10.5\"\n tldr = \"Incorrect Value Appended\"\n matches = find_matches(\"for _item_ in ___:\\n\"\n \" _miles_ = _item_ * 0.62\\n\"\n \" if __cond__:\\n\"\n \" _list_.append(_var_)\")\n for match in matches:\n __cond__ = match[\"__cond__\"]\n _miles_ = match[\"_miles_\"][0]\n _var_ = match[\"_var_\"][0]\n if __cond__.has(_miles_) and __cond__.numeric_logic_check(1, \"_miles_ > 10\"):\n if _var_.id != _miles_.id:\n return explain_r(message, code, label=tldr)\n return False\n\n\n# ########################10.5 END###############################\ndef wrong_debug_10_6():\n \"\"\"\n Should be on change feedback as opposed to on-run\n Returns:\n \"\"\"\n message = \"This is not one of the two changes needed. Undo the change and try again.\"\n code = \"debug_10.6\"\n tldr = \"At least one unnecessary change\"\n matches = find_matches('quakes = earthquakes.get(\"depth\",\"(None)\",\"\")\\n'\n 'quakes_in_miles = []\\n'\n 'for quake in _list1_:\\n'\n ' _list2_.append(quake * 0.62)\\n'\n 'plt.hist(quakes_in_miles)\\n'\n 'plt.xlabel(\"Depth in Miles\")\\n'\n 'plt.ylabel(\"Number of Earthquakes\")\\n'\n 'plt.title(\"Distribution of Depth in Miles of Earthquakes\")\\n'\n 'plt.show()')\n for match in matches:\n name1 = match[\"_list1_\"][0].ast_node.id\n name2 = match[\"_list2_\"][0].ast_node.id\n master_list = [\"quake\", \"quakes\", \"quakes_in_miles\"]\n if (name1 in master_list and name2 in master_list and\n name1 != \"quakes_in_miles\" and name2 != \"quakes\" and\n (name1 != \"quake\" or name2 != \"quake\")):\n return False\n return explain_r(message, code, label=tldr)\n\n\ndef wrong_debug_10_7():\n message = \"This is not the change needed. Undo the change and try again.\"\n code = \"debug_10.7\"\n tldr = \"At least one unnecessary change\"\n match = find_match(\"filtered_sentence_counts = []\\n\"\n \"book_sentence_counts = classics.get('sentences','(None)','')\\n\"\n \"for book in book_sentence_counts:\\n\"\n \" if book >= 5000:\\n\"\n \" filtered_sentence_counts.append(book)\\n\"\n \"plt.hist(filtered_sentence_counts)\\n\"\n \"plt.title('Distribution of Number of Sentences in Long Books')\\n\"\n \"plt.xlabel('Number of Sentences')\\n\"\n \"plt.ylabel('Number of Long Books')\\n\"\n \"plt.show()\\n\")\n\n if not match:\n return explain_r(message, code, label=tldr)\n return False\n\n\n# ########################.....###############################\ndef wrong_initialization_in_iteration():\n message = (\"You only need to initialize {0!s}
once. \"\n \"Remember that statements in an iteration block happens multiple times\")\n code = \"wrong_init_in_iter\"\n tldr = \"Initialization in Iteration\"\n matches = find_matches(\"for ___ in ___:\\n\"\n \" __expr__\")\n if matches:\n for match in matches:\n __expr__ = match[\"__expr__\"]\n submatches = __expr__.find_matches(\"_assign_ = __expr__\", )\n if submatches:\n for submatch in submatches:\n __expr__sub = submatch[\"__expr__\"]\n _assign_ = submatch[\"_assign_\"][0].astNode\n if len(__expr__sub.find_all(\"Name\")) == 0:\n return explain_r(message.format(_assign_.id), code, label=tldr)\n return False\n\n\ndef wrong_duplicate_var_in_add():\n message = \"You are adding the same variable twice; you need two different variables in your addition.\"\n code = \"dup_var\"\n tldr = \"Duplicate Division\"\n match = find_match(\"_item_ + _item_\")\n if match:\n return explain_r(message, code, label=tldr)\n return False\n\n\n# ########################PLOTTING###############################\ndef plot_group_error(output=None):\n if output is None:\n output = get_output()\n if len(output) > 1:\n explain_r('You should only be printing/plotting one thing!', \"print_one\", \"Multiple Calls to print or plot\")\n return True\n elif len(output) == 0:\n explain_r('The algorithm is plotting an empty list. Check your logic.', 'blank_plot', \"Blank Plot\")\n return True\n elif not isinstance(output[0], list):\n explain('You should be plotting, not printing!', 'printing', \"Printing instead of Plotting\")\n return True\n elif len(output[0]) != 1:\n explain('You should only be plotting one thing!', 'one_plot', \"Too Many Plots\")\n return True\n\n\ndef all_labels_present(): # TODO: make sure it's before the show, maybe check for default values\n \"\"\"\n plt.title(\"Distribution of Number of Sentences in Long Books\")\n plt.xlabel(\"Number of Sentences\")\n plt.ylabel(\"Number of Long Books\")\n plt.show()\n Returns:\n \"\"\"\n message = \"Make sure you supply labels to all your axes and provide a title and then call show\"\n code = \"labels_present\"\n tldr = \"Missing Label(s)\"\n match = find_match(\"plt.title(___)\\nplt.show()\")\n match02 = find_match(\"plt.xlabel(___)\\nplt.show()\")\n match03 = find_match(\"plt.ylabel(___)\\nplt.show()\")\n\n if (not match) or (not match02) or (not match03):\n return gently_r(message, code, label=tldr)\n return False\n\n\ndef hard_code_8_5(): # TODO: This one's weird\n message = \"Use iteration to calculate the sum.\"\n code = \"hard_code_8.5\"\n tldr = \"Hard Coded Answer\"\n match = find_matches(\"print(__num__)\")\n if match:\n for m in match:\n __num__ = m[\"__num__\"]\n if len(__num__.find_all(\"Num\")) > 0:\n return explain_r(message, code, label=tldr)\n return False\n","src/lib/pedal/mistakes/__init__.py":"","src/lib/pedal/plugins/blockpy_compatibility.py":"class GracefulExit(Exception):\n pass\n\n\nclass StudentData:\n def __init__(self):\n pass\n\n def get_names_by_type(self, type, exclude_builtins):\n pass\n\n def get_values_by_type(self, type, exclude_builtins):\n pass\n\n\nstudent = StudentData()\n\n\ndef get_output():\n pass\n\n\ndef reset_output():\n pass\n\n\ndef queue_input(*inputs):\n pass\n\n\ndef get_program():\n pass\n\n\ndef parse_program():\n pass\n\n\ndef had_execution_time_error():\n pass\n\n\ndef limit_execution_time():\n pass\n\n\ndef unlimit_execution_time():\n pass\n\n\ndef analyze_program():\n pass\n\n\ndef def_use_error(AstNode):\n pass\n\n\nclass CorruptedAstNode:\n def __init__(self):\n pass\n\n\ndef find_match(instructor_code):\n pass\n\n\ndef find_matches(instructor_code):\n pass\n\n\nclass ASTMap:\n def __init__(self, JSAstMap):\n pass\n\n def get_std_name(self, id):\n pass\n\n def get_std_exp(self, id):\n pass\n\n\nclass AstNode:\n def __init__(self, id):\n pass\n\n def __eq__(self, other):\n pass\n\n def numeric_logic_check(self, mag, expr):\n pass\n\n def __str__(self):\n pass\n\n def __repr__(self):\n pass\n\n def __getattr__(self, key):\n pass\n\n def has(self, AstNode):\n pass\n\n def find_all(self, type):\n pass\n","src/lib/pedal/plugins/cmd_line.py":"from pedal.cait.cait_api import *\nfrom pedal.report import MAIN_REPORT\nfrom pedal.source import set_source\nfrom pedal.tifa import tifa_analysis\nfrom pedal.sandbox.compatibility import *\nimport importlib.util\nimport numpy as np\nimport pandas as pd\n\nimport sys\nimport os\nimport re\n\n\ndef setup(student_code, input_vals):\n \"\"\"\n Clears MAIN_REPORT, sets source, and runs TIFA\n Args:\n student_code: String of student code\n input_vals: list of inputs to be queued.\n Returns:\n None\n \"\"\"\n MAIN_REPORT.clear()\n set_source(student_code)\n tifa_analysis()\n if len(input_vals) != 0:\n queue_input(*input_vals)\n run_student(True)\n return get_sandbox()\n\n\ndef process(file, module, ins_code, report):\n student_code1 = file.read()\n setup(student_code1, inputs) # setup returns a sandbox object\n module.loader.exec_module(ins_code)\n feedback = report.feedback\n return feedback\n\n\np2Flag = True\nsecrets = False\nassignment_id = -1\nif __name__ == \"__main__\":\n # processing args\n feedback_code = sys.argv[1]\n code_dir = sys.argv[2]\n flag = sys.argv[3]\n if flag == \"-p2\":\n p2Flag = True\n inputs = sys.argv[4:]\n elif flag == \"-secrets\":\n p2Flag = True\n secrets = True\n inputs = sys.argv[4:]\n else:\n inputs = sys.argv[3:]\nelse:\n # feedback_suffix = \"prequiz.py\"\n # assignment_id = 409\n feedback_suffix = \"postquiz1.py\"\n assignment_id = 410 # Pass Count = 1\n # feedback_suffix = \"postquiz2-1.py\"\n # assignment_id = 411 # Pass Count = 2\n # feedback_suffix = \"postquiz2-2.py\"\n # assignment_id = 412\n # feedback_code = (\"C:/Users/User/Documents/Luke_Stuff/Research/ComputationalThinking/DictionaryUnit/test_cmd/\"\n # \"ins_script.py\")\n feedback_code = (\"C:/Users/User/Documents/Luke_Stuff/Research/ComputationalThinking/\"\n \"DictionaryUnit/ID/Assessments/\")\n feedback_code += feedback_suffix\n\n code_dir = (\"C:/Users/User/Documents/Luke_Stuff/Research/ComputationalThinking/ResearchData/\"\n \"ComputationalThinking/Tests/results/\")\n code_dir += \"Spring2019/DictionaryData/cs1014_spr2019_log-v1/\"\n # code_dir += \"Fall2018/DictionaryData/exported-f18/\"\n p2Flag = True\n secrets = True\n inputs = []\n\n# Grabbing instructor feedback code\nins_mod = re.match(\"(?:.*/)(.*).py\", feedback_code)[1]\nmy_spec = importlib.util.spec_from_file_location(ins_mod, feedback_code)\nfoo = importlib.util.module_from_spec(my_spec)\n\n# preparing to process\n\n\n# Grabbing student files\nif p2Flag:\n student_feedback = []\n pass_count = 0\n main_table = \"MainTable\"\n if secrets:\n main_table += \"-2\"\n main_table += \".csv\"\n df = pd.read_csv(code_dir + main_table)\n code_states = code_dir + \"CodeStates/\"\n for index, row in df.iterrows():\n scan = True\n if assignment_id >= 0:\n if secrets:\n if int(row[\"AssignmentID\"]) != assignment_id:\n scan = False\n if scan:\n code_f = code_states + str(int(row['CodeStateID'])) + \"/__main__.py\"\n # check assignment and find corresponding answer key in DictionaryUnit/ID/Assessments/...\n with open(code_f) as code:\n feedback_result = process(code, my_spec, foo, MAIN_REPORT)\n # df.at[index, 'InterventionMessage'] = feedback_result\n student_feedback.append(feedback_result)\n score = 0.0\n if not feedback_result:\n score = 1.0\n pass_count += 1\n df.at[index, 'Score'] = score\n df.to_csv(code_dir + \"processed.csv\", index=False)\nelse:\n student_feedback = []\n print(os.getcwd())\n student_files_base = os.listdir(code_dir)\n student_files = []\n for code_name in student_files_base:\n student_files.append(code_dir + code_name)\n for code_name in student_files:\n with open(code_name) as code_f:\n student_feedback.append(process(code_f, my_spec, foo, MAIN_REPORT))\n if __name__ == \"__main__\":\n print(student_feedback)\n","src/lib/pedal/plugins/grade_magic.py":"# Built-in imports\nimport json\nimport requests\n\n# IPython imports\nfrom IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic)\nfrom IPython.display import Javascript, display\nfrom IPython.utils.io import capture_output, CapturedIO\n\n# Logging imports\nimport os\nimport sys\nfrom warnings import warn\n# from traitlets import Bool\nimport time\n\n# TODO: Opportunity here to add in requests-cache. This would allow us to avoid\n# the repeated trip. However, you'll need to handle expiring the cache in a\n# smart way. One option is to write a command line script to just wipe as\n# necessary. Simply deleting the cache file would be pretty easy, assuming it\n# installs per user.\n\n# This really should come in as a configuration setting somewhere.\nBLOCKPY_URL = 'https://think.cs.vt.edu/blockpy/load_assignment_give_feedback'\n\n\ndef get_response_error(response):\n \"\"\"\n Transform a Response object into a friendlier string.\n\n Args:\n response (requests.Response): A Requests reponse object to parse for\n some kind of error.\n Returns:\n str: A string representation of the URL response.\n \"\"\"\n return \"{} {}: {}\".format(response.status_code, response.reason,\n response.text)\n\n\ndef download_on_run(assignment_id):\n \"\"\"\n Download the on_run (give_feedback) code to use to test their solution.\n\n Args:\n assignment_id (int OR str): The ID of the assignment to get the\n on_run code for.\n Returns:\n bool: Whether or not the request was successful.\n str: If unsuccesful, a message to display to the user. Otherwise, it'll\n be the on_run code.\n \"\"\"\n data = {'assignment_id': assignment_id}\n try:\n response = requests.get(BLOCKPY_URL, data=data)\n except Exception as error:\n return False, str(error)\n try:\n result = response.json()\n except ValueError:\n # Failed to parse the JSON; perhaps it was some text data?\n return False, get_response_error(response)\n if result['success']:\n return True, result['give_feedback']\n else:\n return False, result['message']\n\n\nPEDAL_PIPELINE = '''\nfrom pedal.report import *\nfrom pedal.report.imperative import *\nclear_report()\nfrom pedal.source import set_source\nset_source({student_code})\nfrom pedal.tifa import tifa_analysis\ntifa_analysis(True)\nfrom pedal.sandbox.compatibility import *\nqueue_input({inputs})\nrun_student(True)\nstudent = get_sandbox()\nfrom pedal.cait.cait_api import parse_program\n{on_run}\nfrom pedal.resolvers import simple\nSUCCESS, SCORE, CATEGORY, LABEL, MESSAGE, DATA, HIDE = simple.resolve()\n'''\n\n\ndef blockpy_grade(assignment_id, student_code, inputs):\n \"\"\"\n Helper function to capture the request from the server.\n\n Args:\n assignment_id (int): The assignment ID to look up and use the on_run\n code for.\n student_code (str): The code that was written by the student.\n\n inputs (str): The inputs to queue into the assignment\n\n Returns:\n str: The HTML formatted feedback for the student.\n \"\"\"\n successful_download, on_run = download_on_run(assignment_id)\n # If it failed, let's display some information about why.\n if not successful_download:\n return on_run\n return execute_on_run_code(on_run, student_code, inputs)\n\n\ndef execute_on_run_code(on_run, student_code, inputs):\n \"\"\"\n Actually execute the on_run code for the given student code.\n \"\"\"\n # Even though the student code is a string, we need to escape it to prevent\n # any weirdness from being in the instructor code.\n escaped_student_code = json.dumps(student_code)\n instructor_code = PEDAL_PIPELINE.format(on_run=on_run,\n student_code=escaped_student_code,\n # inputs=','.join(inputs))\n inputs=inputs)\n # Execute the instructor code in a new environment\n global_variables = globals()\n compiled_code = compile(instructor_code, 'instructor_code.py', 'exec')\n exec(compiled_code, global_variables)\n category = global_variables['CATEGORY']\n label = global_variables['LABEL']\n message = global_variables['MESSAGE']\n # In some cases, we might want to override how the text is rendered.\n if category.lower() == 'instructor' and label.lower() == 'explain':\n category = \"Instructor Feedback\"\n label = ''\n # Return the result as HTML\n return '''{}: {}
{}'''.format(category, label, message)\n\n\n# The following string literals are used to create the JavaScript code that\n# creates the Python code that will execute the instructor's feedback code\n# using the student's Python code.\n\n# Extract out the student code, embed the result\nEXTRACT_STUDENT_CODE = r\"\"\"\n// Convert Notebook cells to a string of Python code\nvar makePython = function(cell) {\n if (cell.cell_type == \"code\") {\n // Code is embedded unchanged, unless it is magic\n var source = cell.get_text();\n if (source.startsWith('%')) {\n // Skip magic\n return '';\n } else {\n return source;\n }\n } else if (cell.cell_type == \"markdown\" ||\n cell.cell_type == \"raw\") {\n // Markdown and text is wrapped in a string.\n var escaped_text = cell.get_text().replace(/'''/g, \"\\\\'\\\\'\\\\'\");\n return \"'''\"+escaped_text+\"'''\";\n }\n}\nvar isUsable = function(cell) {\n return cell.cell_type == \"code\" ||\n cell.cell_type == \"markdown\" ||\n cell.cell_type == \"raw\";\n}\nvar cells = Jupyter.notebook.get_cells();\nvar source_code = cells.filter(isUsable).map(makePython).join(\"\\n\");\nsource_code = JSON.stringify(source_code);\nconsole.log(source_code);\n// Start constructing the feedback code (which will be Python).\nvar on_run_code = [];\non_run_code.push(\"student_code=\"+source_code);\n\"\"\"\n\n# Retrieve the last cell, and also recolor it a little for style\nANIMATE_LAST_CELL = r\"\"\"\n// While we are accessing the server, recolor the last cell a little.\nvar last = null;\nif (cells.length > 0) {\n last = cells[cells.length-1];\n $(last.element).animate({\"background-color\": \"#E0E6FF\"}, 1000);\n}\n\"\"\"\n\n# If the %grade magic is used, we run the code directly.\nLOCAL_GRADE = r'''\non_run_code.push(\"from pedal.plugins.grade_magic import execute_on_run_code\");\non_run_code.push('print(execute_on_run_code({on_run_code}, student_code, {inputs}))');\n'''\n\n# If the %grade_blockpy magic is used, we need to get the on_run from blockpy.\nBLOCKPY_GRADE = r'''\non_run_code.push(\"from pedal.plugins.grade_magic import blockpy_grade\");\non_run_code.push('import json')\non_run_code.push('inputs = {inputs}')\nconsole.log('inputs = {inputs}')\non_run_code.push(\"print(blockpy_grade({assignment}, student_code, inputs))\");\n'''\n\n# This chunk actually performs the on_run code execution using the kernel.\nEXECUTE_CODE = r'''\non_run_code = on_run_code.join(\"\\n\");\nconsole.log(on_run_code);\nvar kernel = IPython.notebook.kernel;\nif (kernel !== null) {\n var t = kernel.execute(on_run_code, { 'iopub' : {'output' : function(x) {\n if (x.msg_type == \"error\") {\n // If this was an error, show the traceback properly.\n if (last !== null) {\n last.output_area.append_error(x.content);\n console.error(x);\n } else {\n console.error(\"Could not append to final cell.\", x);\n }\n } else if (!x.content.data && x.content.text) {\n // If it was valid data, we show it as HTML.\n console.log(x);\n element.html(x.content.text.replace(/\\n/g, \"
\"));\n } else {\n // I'm not sure what it is - better dump it on the console.\n console.log(x);\n }\n // Decolor the last cell if it was there.\n if (last !== null) {\n last = cells[cells.length-1];\n $(last.element).animate({\"background-color\": \"white\"}, 1000);\n }\n }}});\n}'''\n\n\n@magics_class\nclass GradeMagic(Magics):\n \"\"\"\n This class holds the magic for the %grade and %grade_blockpy\n \"\"\"\n\n @line_magic\n def grade_logstart(self, line=\"\"):\n # ######Logging\n ts = time.time()\n logger = self.shell.logger # logging\n old_logfile = self.shell.logfile # logging\n directory = os.path.expanduser(\"log_folder{}~/\".format(line))\n logfname = os.path.expanduser(\"log_folder{}~/log_{}.py~\".format(line, ts))\n self.shell.logfile = logfname\n loghead = u'# IPython log file\\n\\n'\n try:\n os.makedirs(directory, exist_ok=True)\n logger.logstart(logfname, loghead, 'rotate', True, True,\n True)\n except BaseException:\n self.shell.logfile = old_logfile\n warn(\"Couldn't start log: %s\" % sys.exc_info()[1])\n self.shell.run_code(\"input = __builtins__.input\")\n self.shell.run_code(\"print = __builtins__.print\")\n self.shell.run_code(\"sum = __builtins__.sum\")\n self.shell.run_code(\"len = __builtins__.len\")\n\n @line_magic\n def grade_logstop(self, line=\"\"):\n self.shell.logger.logstop()\n\n def logging(self):\n # ######Logging\n ts = time.time()\n logger = self.shell.logger # logging\n old_logfile = self.shell.logfile # logging\n logfname = os.path.expanduser(\"log_folder~/log_{}.py~\".format(ts))\n self.shell.logfile = logfname\n loghead = u'# IPython log file\\n\\n'\n try:\n logger.logstart(logfname, loghead, 'rotate', False, True,\n True)\n except BaseException:\n self.shell.logfile = old_logfile\n warn(\"Couldn't start log: %s\" % sys.exc_info()[1])\n logger.timestamp = False\n input_hist = self.shell.history_manager.input_hist_raw\n logger.log_write(u'\\n'.join(input_hist[1:]))\n logger.log_write(u'\\n')\n logger.timestamp = True\n self.shell.logger.logstop()\n # ######Logging\n\n # noinspection PyMethodMayBeStatic\n def grade_parser(self, line, cell=None):\n if ',' in line:\n if cell is None:\n assignment, line = line.split(\",\", maxsplit=1)\n else:\n assignment = None\n inputs = json.dumps(line.split(\",\"))\n inputs = \"\\\\'\" + inputs[1:len(inputs) - 1] + \"\\\\'\"\n else:\n if cell is None:\n assignment, inputs = line, \"\"\n else:\n inputs = line\n assignment = \"\"\n inputs = json.dumps(inputs)\n return {\"inputs\": inputs, \"assignment\": assignment}\n\n # noinspection PyMethodMayBeStatic\n def unified_helper(self, local_code, **kwargs):\n code = EXTRACT_STUDENT_CODE\n code += ANIMATE_LAST_CELL\n code += local_code.format(**kwargs)\n code += EXECUTE_CODE\n return code\n\n @cell_magic\n def grade(self, line=\"\", cell=\"\"):\n dump = self.grade_parser(line, cell)\n code = self.unified_helper(LOCAL_GRADE, on_run_code=\"INSTRUCTOR_CODE\", inputs=dump['inputs'])\n cell = cell.replace(\"\\\\\", \"\\\\\\\\\")\n cell = cell.replace(\"\\n\", \"\\\\n\")\n cell = cell.replace(\"'\", \"\\\\'\")\n cell = cell.replace('\"', '\\\\\"')\n # Runs this code in the kernel as python code\n # Can also run compiled code\n self.shell.run_code(\"INSTRUCTOR_CODE = \" + '\"' + cell + '\"')\n # TODO: This was the easier way for me to get this to work\n # This might be worth using in more depth to have less translation\n # to and from javascript. See usage_examples\n return display(Javascript(code))\n\n @line_cell_magic\n def usage_examples(self, line=\"\", cell=\"print('running cell')\\nprint('running cell2')\"):\n # Runs code in the kernel's context\n self.shell.run_code(\"print('fun')\")\n\n # Runs code in kernel's context using compiled code\n sample = compile(cell, \"usage_examples.py\", \"exec\")\n self.shell.run_code(sample)\n\n # runs javascript code\n self.shell.run_cell_magic(\"javascript\", \"\", \"console.log('I do JAVASCRIPT');\\n\")\n # Maybe can use javascript execution to pass things around...not sure though...can't get it to work\n # You can pass values, but it doesn't seem to work unless you run it again.\n # https://michhar.github.io/javascript-and-python-have-a-party/\n\n self.shell.run_cell_magic(\n \"javascript\", \"\",\n # js_code = Javascript(\n \"\"\"var callbacks = { iopub : { output: function(out_data){ console.log(out_data) } } };\\n\"\"\"\n \"\"\"var code = \"fun = 12\";\\n\"\"\"\n \"\"\"IPython.notebook.kernel.execute(code);\\n\"\"\")\n # handle = display(js_code, display_id=\"usage_examples\")\n # handle.update(handle)\n self.shell.run_cell_magic(\"javascript\", \"\", \"console.log('I do JAVASCRIPT TOO!!');\\n\")\n # captures standard output, standard error, etc. and stops or not stops it\n # class IPython.utils.capture.capture_output(stdout=True, stderr=True, display=True)\n # Note that Tracebacks aren't put in standard error?\n with capture_output(True, False, False) as captured:\n print(dir(self))\n self.shell.run_code(\"print(fun)\")\n sys.stderr.write(\"spam\\n\")\n print(\"I captured stdout\")\n print(captured.stdout)\n print(\"I captured stderr\")\n print(captured.stderr)\n\n @line_magic\n def grade_blockpy(self, line=\"\"):\n dump = self.grade_parser(line)\n code = self.unified_helper(BLOCKPY_GRADE, assignment=dump[\"assignment\"], inputs=dump[\"inputs\"])\n return display(Javascript(code))\n\n\ndef load_ipython_extension(ipython):\n \"\"\"\n Register this plugin with Jupyter Notebooks. Although it is allegedly\n necessary in order to make this a plugin, we do not actually use it.\n \"\"\"\n ipython.register_magics(GradeMagic)\n\n\n\"\"\"\nDEPRECATED: The following lines of code do not seem to be necessary to\n register this plugin with Jupyter.\ndef _jupyter_server_extension_paths():\n return [{\n \"module\": \"pedal.plugins.grade_magic\"\n }]\n\n# jupyter serverextension enable --py pedal.plugins.grade_magic\ndef load_jupyter_server_extension(nbapp):\n from IPython import get_ipython\n get_ipython().register_magics(GradeMagic)\n\"\"\"\n","src/lib/pedal/plugins/test_reference_solution.py":"'''\nTool for running a Grading script through a series of student reference\nsolutions.\n\npython -m pedal.plugins.test_reference_solution \n'''\n\n# Runner\nfrom pedal.report.imperative import clear_report, MAIN_REPORT\nfrom pedal.cait import parse_program\nimport sys\nimport os\nfrom io import StringIO\nfrom contextlib import redirect_stdout\nimport unittest\nfrom unittest.mock import patch, mock_open\nimport argparse\n\n# Arguments\nDEFAULT_REFERENCE_SOLUTIONS_DIR = \"reference_solutions/\"\n\n\nclass TestReferenceSolutions(unittest.TestCase):\n maxDiff = None\n\n\ndef substitute_args(arg, student_path, seed):\n if arg == \"$_STUDENT_MAIN\":\n return student_path\n elif arg == \"$_STUDENT_NAME\":\n return seed\n return arg\n\n\ndef add_test(class_, name, python_file,\n expected_output_path, expected_output,\n grader_code, grader_path, grader_args, student_path):\n seed = find_seed(python_file)\n grader_args = [substitute_args(arg, student_path, seed) for arg in grader_args]\n def _inner_test(self):\n captured_output = StringIO()\n with redirect_stdout(captured_output):\n # TODO: mock_open will only work if we are not anticipating\n # the student or instructor to open files...\n with patch('builtins.open', mock_open(read_data=python_file),\n create=True):\n with patch.object(sys, 'argv', grader_args):\n clear_report()\n grader_exec = compile(grader_code, grader_path, 'exec')\n exec(grader_exec, globals())\n #print(repr(MAIN_REPORT.feedback[0].mistake['error']))\n actual_output = captured_output.getvalue()\n if expected_output is None:\n print(\"File not found:\", expected_output_path)\n with open(expected_output_path, 'w') as out:\n out.write(actual_output)\n print(\"\\tCreated missing file with current output\")\n else:\n self.assertEqual(actual_output, expected_output)\n setattr(class_, 'test_' + name, _inner_test)\n\ndef find_seed(python_code):\n try:\n ast = parse_program(python_code)\n for assign in ast.find_all(\"Assign\"):\n if assign.targets[0].ast_name != \"Name\":\n continue\n if assign.targets[0].id == \"__STUDENT_SEED__\":\n if assign.value.ast_name == \"Str\":\n return assign.value.s\n elif assign.value.ast_name == \"Num\":\n return assign.value.n\n elif assign.value.ast_name == \"List\":\n return [e.n for e in assign.value.elts]\n except SyntaxError:\n return 0\n return 0\n\n# Load reference solutions\ndef add_all_tests(grader_path, reference_solutions_dir, grader_args, limit):\n # Load grader file\n with open(grader_path, 'r') as grader_file:\n grader_code = grader_file.read()\n for filename in os.listdir(reference_solutions_dir):\n if limit is not None and limit != filename:\n continue\n path = os.path.join(reference_solutions_dir, filename)\n if path.endswith(\".py\"):\n text_path = path[:-2] + \"txt\"\n with open(path, 'r') as python_file:\n python = python_file.read()\n if os.path.exists(text_path):\n with open(text_path, 'r') as output_file:\n output = output_file.read()\n else:\n output = None\n add_test(TestReferenceSolutions, filename[:-3], python,\n text_path, output, \n grader_code, grader_path, grader_args, path)\n\n\ndef run_tests():\n unittest.main(argv=['first-arg-is-ignored'])\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Run instructor grading script on a collection of reference solutions')\n parser.add_argument('grader', help='The path to the instructor grading script.')\n parser.add_argument('--path', '-p',\n help='The path to the student reference files. If not given, assumed to be in the same folder '\n 'as the instructor grading script.',\n default=DEFAULT_REFERENCE_SOLUTIONS_DIR)\n parser.add_argument('--args', '-a',\n help='Pass in arguments that the grading script will use. '\n 'Variable substitutions include \"$_STUDENT_MAIN\".',\n default='test_reference_solution.py,$_STUDENT_MAIN,$_STUDENT_NAME')\n parser.add_argument('--limit', '-l', help='Limit to a specific file.', default=None)\n args = parser.parse_args()\n \n # Turn the reference solutions path into an absolute filename\n if os.path.isabs(args.path):\n reference_solutions_path = args.path\n else:\n reference_solutions_path = os.path.join(os.path.dirname(args.grader), args.path)\n \n # If no reference solutions folder, let's make it\n if not os.path.exists(reference_solutions_path):\n os.mkdir(reference_solutions_path)\n \n # Fix up the passed in args\n grader_args = args.args.split(\",\")\n \n # Check that we actually have some files to try out\n if not os.listdir(reference_solutions_path):\n print(\"No reference solutions found\")\n else:\n add_all_tests(args.grader, reference_solutions_path, grader_args, args.limit)\n run_tests()\n","src/lib/pedal/plugins/vpl.py":"from pedal.plugins.vpl_unittest import UnitTestedAssignment\n\n\"\"\"\nSome kind of function to break up the sections\n\"\"\"\nimport re\nimport sys\nfrom html.parser import HTMLParser\n\nfrom pedal.report import MAIN_REPORT\nfrom pedal import source\nfrom pedal.resolvers import sectional\nfrom pedal.cait.cait_api import expire_cait_cache\n\n\nclass VPLStyler(HTMLParser):\n HEADERS = (\"h1\", \"h2\", \"h3\", \"h4\", \"h5\")\n\n def __init__(self):\n super().__init__()\n self.reset()\n self.fed = []\n self.inside_pre = False\n\n def convert(self, html):\n self.feed(html)\n return self.get_data()\n\n @property\n def text(self):\n return ''.join(self.fed)\n\n def get_data(self):\n return self.text\n\n def force_new_line(self):\n if self.text and self.text[-1] not in (\"\\n\", \"\\r\"):\n self.fed.append(\"\\n\")\n\n def handle_starttag(self, tag, attrs):\n if tag in self.HEADERS:\n self.force_new_line()\n self.fed.append(\"-\")\n elif tag in (\"pre\",):\n self.force_new_line()\n self.fed.append(\">\")\n self.inside_pre = True\n\n def handle_data(self, data):\n if self.inside_pre:\n # Need to prepend \">\" to the start of new lines.\n self.fed.append(data.replace(\"\\n\", \"\\n>\"))\n else:\n self.fed.append(data)\n\n def handle_endtag(self, tag):\n if tag in self.HEADERS:\n self.fed.append(\"\")\n elif tag in (\"pre\",):\n self.fed.append(\"\")\n self.inside_pre = False\n\n\ndef strip_tags(html):\n return VPLStyler().convert(html)\n\n\ndef set_maximum_score(number, cap=True, report=None):\n if report is None:\n report = MAIN_REPORT\n report['vpl']['score_maximum'] = number\n report['vpl']['score_cap'] = cap\n\n\ndef resolve(report=None, custom_success_message=None):\n if report is None:\n report = MAIN_REPORT\n print(\"<|--\")\n success, score, hc, messages_by_group = sectional.resolve(report)\n last_group = 0\n for group, messages in sorted(messages_by_group.items()):\n if group != last_group:\n for intermediate_section in range(last_group, group, 2):\n print(\"-\" + report['source']['sections'][1 + intermediate_section])\n printed_first_bad = False\n for message in messages:\n if message['priority'] in ('positive', 'instructions'):\n print(strip_tags(message['message']))\n elif not printed_first_bad:\n print(strip_tags(message['message']))\n printed_first_bad = True\n last_group = group\n print(\"-Overall\")\n if success:\n if custom_success_message is None:\n print(\"Complete! Great job!\")\n else:\n print(custom_success_message)\n else:\n print(\"Incomplete\")\n print(\"--|>\")\n print(\"Grade :=>>\", round(score))\n\n\nclass SectionalAssignment:\n max_points = 1\n sections = None\n\n def __init__(self, filename=None, max_points=None, report=None):\n self.report = MAIN_REPORT if report is None else report\n find_file(filename if filename else self.filename,\n sections=True, report=report)\n set_maximum_score(self.max_points\n if max_points is None else max_points)\n source.check_section_exists(self.sections)\n\n def pre_test(self):\n source.next_section()\n verified = source.verify_section()\n expire_cait_cache()\n return verified\n\n def post_test(self):\n return True\n\n def resolve(self):\n checks = ((self.pre_test() and\n getattr(self, attr)() and\n self.post_test())\n for attr in dir(self)\n if attr.startswith('test_') and\n callable(getattr(self, attr)))\n if all(checks):\n self.report.set_success()\n resolve(report=self.report)\n\n\nfrom pedal.plugins.vpl_unittest import UnitTestedAssignment\n\n\ndef unittest_resolver(phases, report=None, custom_success_message=None):\n success = True\n for title, phase in phases:\n outcome = phase()._run_all_tests()\n if not outcome:\n break\n success = success and outcome\n resolve(custom_success_message=custom_success_message)\n","src/lib/pedal/plugins/vpl_safe_runner.py":"from pedal import run\nfrom pedal import set_source_file\nimport sys\n\nif __name__ == \"__main__\":\n set_source_file(sys.argv[1] if len(sys.argv) > 1 else 'main.py')\n student = run(context=False)\n print(student.raw_output)\n if student.exception:\n print(student.exception_formatted, file=sys.stderr)\n","src/lib/pedal/plugins/vpl_unittest.py":"from unittest.util import safe_repr\nfrom pedal import gently\nfrom pedal.assertions.assertions import _normalize_string\n\n\nclass UnitTestedAssignment:\n DELTA = .001\n\n class AssertionException(Exception):\n def __init__(self, message):\n self.message = message\n\n def __init__(self):\n pass\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def _run_all_tests(self):\n methods = [func for func in dir(self)\n if callable(getattr(self, func)) and\n func.startswith('test_')]\n all_passed = True\n for method in methods:\n self.setUp()\n try:\n getattr(self, method)()\n except UnitTestedAssignment.AssertionException as e:\n gently(e.message)\n all_passed = False\n self.tearDown()\n return all_passed\n\n def assertSimilarStrings(self, first, second, msg):\n if _normalize_string(first) != _normalize_string(second):\n return self.assertEqual(first, second, msg, exact=True)\n\n def assertNotSimilarStrings(self, first, second, msg):\n if _normalize_string(first) == _normalize_string(second):\n return self.assertEqual(first, second, msg, exact=True)\n\n def assertLessEqual(self, val1, val2, msg=None):\n if not (val1 <= val2):\n self.fail(msg, \"{} is not less than or equal to {}\".format(safe_repr(val1), safe_repr(val2)))\n\n def assertGreaterEqual(self, val1, val2, msg=None):\n if not (val1 >= val2):\n self.fail(msg, \"{} is not greater than or equal to {}\".format(safe_repr(val1), safe_repr(val2)))\n\n def assertNotEqual(self, val1, val2, msg=None, exact=False):\n if val1 != val2:\n return\n if not exact and isinstance(val1, str) and isinstance(val2, str):\n self.assertNotSimilarStrings(val1, val2, msg)\n elif (not exact and isinstance(val1, (int, float)) and\n isinstance(val2, (int, float))):\n if abs(val2 - val1) > UnitTestedAssignment.DELTA:\n return\n standardMsg = \"{} == {}\".format(safe_repr(val1), safe_repr(val2))\n self.fail(msg, standardMsg)\n\n def assertEqual(self, val1, val2, msg=None, exact=False):\n if val1 == val2:\n return\n if not exact and isinstance(val1, str) and isinstance(val2, str):\n self.assertSimilarStrings(val1, val2, msg)\n elif (not exact and isinstance(val1, (int, float)) and\n isinstance(val2, (int, float))):\n if abs(val2 - val1) <= UnitTestedAssignment.DELTA:\n return\n standardMsg = \"{} != {}\".format(safe_repr(val1), safe_repr(val2))\n self.fail(msg, standardMsg)\n\n def assertIn(self, member, container, msg=None):\n if member not in container:\n standardMsg = \"{} not found in {}\".format(safe_repr(member),\n safe_repr(container))\n self.fail(msg, standardMsg)\n\n def assertNotIn(self, member, container, msg=None):\n if member in container:\n standardMsg = \"{} found in {}\".format(safe_repr(member),\n safe_repr(container))\n self.fail(msg, standardMsg)\n\n def assertTrue(self, value, msg=None):\n if not value:\n self.fail(msg, \"{} is not true\".format(value))\n\n def assertFalse(self, value, msg=None):\n if value:\n self.fail(msg, \"{} is not false\".format(value))\n\n def assertSandbox(self, sandbox, msg=None):\n if sandbox.exception is not None:\n self.fail(msg, sandbox.format_exception())\n\n def assertIsInstance(self, value, parent, msg=None):\n if not isinstance(value, parent):\n self.fail(msg, \"{} is not an instance of {}\".format(safe_repr(value), safe_repr(parent)))\n\n def assertHasAttr(self, object, attr, msg=None):\n if not hasattr(object, attr):\n self.fail(msg, \"{} does not have an attribute named {}\".format(safe_repr(object), safe_repr(attr)))\n\n def fail(self, message, standardMsg):\n if message is None:\n message = standardMsg\n raise UnitTestedAssignment.AssertionException(message)\n","src/lib/pedal/plugins/__init__.py":"\n'''\ndef default_pipeline(tifa=False, cait=True, sandbox=True):\n next_section()\n results = []\n if tifa:\n results.append(tifa_analysis())\n if cait:\n results.append(parse_program())\n if sandbox:\n results.append(execute())\n return tuple(results)\n'''\n","src/lib/pedal/questions/graders.py":"from pedal.questions import QuestionGrader\n\nfrom pedal import run, compliment, explain, gently\nfrom pedal.report.imperative import MAIN_REPORT\nfrom pedal.assertions.assertions import *\nfrom pedal.toolkit.functions import *\n\nclass FunctionGrader(QuestionGrader):\n MAX_POINTS = 10\n DEFINITION_POINTS = 3\n COMPONENTS_POINTS = 1\n MAX_COMPONENTS_POINTS = 2\n UNIT_TEST_TYPE_POINTS = None\n UNIT_TEST_VALUE_POINTS = None\n UNIT_TEST_TOTAL_POINTS = 5\n UNIT_TEST_TYPE_RATIO = .5\n UNIT_TEST_COMPLETION_POINTS = 2\n \n def __init__(self, function_name, signature, tests):\n super().__init__()\n self.function_name = function_name\n self.signature = signature\n self.tests = tests\n self.points = 0\n \n def _test(self, question):\n defined = self.grade_definition(question)\n \n if not defined:\n return self.report_status(question)\n \n self.grade_components(question)\n \n passed_tests = self.grade_unit_tests(question)\n if not passed_tests:\n return self.report_status(question)\n \n self.report_success(question)\n \n def report_status(self, question):\n pass\n \n def report_success(self, question):\n question.answer()\n \n def grade_definition(self, question):\n self.student = run(report_exceptions=True, context=False)\n self.student.report_exceptions_mode=False\n \n self.definition = match_signature_muted(self.function_name, *self.signature)\n if not assertGenerally(self.definition):\n gently(\"Function not defined\")\n return False\n \n if self.student.exception:\n return False\n if not assertHasFunction(self.student, self.function_name):\n gently(\"Function defined incorrectly\")\n return False\n \n self.points += self.DEFINITION_POINTS\n return True\n \n def grade_components(self, question):\n self.component_points = 0\n components = self._get_functions_with_filter('grade_component_')\n for component in components:\n component(question)\n self.component_points = min(self.component_points, self.MAX_COMPONENTS_POINTS)\n self.points += self.component_points\n \n def assertEqual(self, *parameters):\n return assertEqual(*parameters)\n \n def grade_unit_tests(self, question):\n all_good = True\n if self.UNIT_TEST_TOTAL_POINTS is None:\n TYPE_POINT_ADD = self.UNIT_TEST_TYPE_POINTS\n VALUE_POINT_ADD = self.UNIT_TEST_VALUE_POINTS\n else:\n ratio = self.UNIT_TEST_TYPE_RATIO\n TYPE_POINT_ADD = (self.UNIT_TEST_TOTAL_POINTS/len(self.tests) * (ratio))\n VALUE_POINT_ADD = (self.UNIT_TEST_TOTAL_POINTS/len(self.tests) * (1-ratio))\n for arguments, expected in self.tests:\n #import sys\n #print(repr(arguments), file=sys.stderr)\n result = self.student.call(self.function_name, *arguments, context=False)\n #print(repr(self.student.exception), file=sys.stderr)\n if self.student.exception:\n all_good = False\n continue\n if assertIsInstance(result, type(expected)):\n self.points += TYPE_POINT_ADD\n else:\n all_good = False\n continue\n if self.assertEqual(result, expected):\n self.points += VALUE_POINT_ADD\n else:\n all_good = False\n if all_good:\n self.points += self.UNIT_TEST_COMPLETION_POINTS\n else:\n gently(\"Failing unit tests\")\n return all_good\n","src/lib/pedal/questions/setup.py":"from pedal.report.imperative import MAIN_REPORT\n\nimport hashlib\n\ndef _name_hash(name):\n return hashlib.md5(name.encode('utf8')).digest()[0]\n\ndef _setup_questions(report):\n '''\n Initialize any necessary fields for the report's question tool.\n \n Args:\n report (Report): The report object to store data and feedback in.\n '''\n if 'questions' not in report:\n report['questions'] = {\n 'seed': 0\n }\n\ndef set_seed(seed_value, report=None):\n '''\n Sets the seed that will be used in selecting questions.\n \n Args:\n seed_value (int or str or iterable[int]): The value to use when\n selecting questions, deterministically. If int, the same index\n will be used for all questions. If an iterable of ints, each\n one will serve as the index for the corresponding problem (throws\n an exception if the iterable isn't long enough). If a string,\n it will be hashed to a value (the hash is deterministic across\n platforms) that will be modulo'd to be in the right range for the\n pool. Presently, hashing generates values from [0, 256) so you\n need to limit your questions to 256.\n report (Report): The report object to store data and feedback in. If\n left None, defaults to the global MAIN_REPORT.\n '''\n if report is None:\n report = MAIN_REPORT\n report['questions']['seed'] = seed_value\n","src/lib/pedal/questions/__init__.py":"\"\"\"\nA tool for providing dynamic questions to learners.\n\"\"\"\n\nNAME = 'Questions'\nSHORT_DESCRIPTION = \"Provides dynamic questions to learners\"\nDESCRIPTION = '''\n'''\nREQUIRES = []\nOPTIONALS = []\nCATEGORY = 'Instructions'\n\n__all__ = ['NAME', 'DESCRIPTION', 'SHORT_DESCRIPTION', 'REQUIRES', 'OPTIONALS',\n 'Question', 'Pool', 'set_seed']\n\nfrom pedal.report.imperative import MAIN_REPORT\nfrom pedal.questions.setup import _setup_questions, set_seed, _name_hash\n\nclass QuestionGrader:\n def _get_functions_with_filter(self, filter='grade_'):\n return [getattr(self, method_name) for method_name in dir(self)\n if method_name.startswith(filter) and\n callable(getattr(self, method_name))]\n def _test(self, question):\n methods = self._get_functions_with_filter()\n for method in methods:\n method(question)\n\nclass Question:\n def __init__(self, name, instructions, tests, seed=None, report=None):\n self.name = name\n self.instructions = instructions\n self.tests = tests\n self.seed = seed\n if report is None:\n report = MAIN_REPORT\n self.report = report\n self.answered = False\n \n def answer(self):\n self.answered = True\n \n def ask(self):\n if isinstance(self.tests, QuestionGrader):\n self.tests._test(self)\n else:\n for test in self.tests:\n test(self)\n if not self.answered:\n self.report.attach('Question', category='Instructions', tool='Questions',\n group=self.report.group,\n priority='instructions',\n hint=self.instructions)\n\nclass Pool:\n _POOL_TRACKER = 0\n def __init__(self, name, choices, seed=None, report=None, position=None):\n self.name = name\n self.choices = choices\n self.seed = seed\n if report is None:\n report = MAIN_REPORT\n self.report = report\n if position is None:\n position = Pool._POOL_TRACKER\n Pool._POOL_TRACKER += 1\n self.position = position\n\n def choose(self, force=None):\n _setup_questions(self.report)\n if force is None:\n if self.seed is None:\n force = self.report['questions']['seed']\n if isinstance(force, str):\n force = _name_hash(force+self.name)\n # Assume iterable; could be check that throws better error\n if not isinstance(force, int):\n force = force[self.position]\n else:\n force = self.seed\n return self.choices[force % len(self.choices)]\n \n @property\n def answered(self):\n for choice in self.choices:\n if choice.answered:\n return True\n return False\n","src/lib/pedal/report/feedback.py":"\"\"\"\nSimple data classes for storing feedback to present to learners.\n\"\"\"\n\n__all__ = ['Feedback']\n\n\nclass Feedback:\n \"\"\"\n A class for storing raw feedback.\n\n Attributes:\n label (str): An internal name for this specific piece of feedback.\n tool (str): An internal name for indicating the tool that created\n this feedback.\n category (str): A human-presentable name showable to the learner.\n More than one Feedback will be in a category, most\n likely.\n priority (str): An indication of how important this feedback is.\n Might be \"high/medium/low\" or the name of a\n category (tool?) to supersede. Exactly how this gets\n used is up to the resolver. A special kind of priority\n is \"positive\" - which indicates that this feedback is\n positive, and the information is good to convey to the\n student.\n group (int or str): The group that this piece of feedback should be\n associated with. Some resolvers want to group feedback using this\n identifier.\n result (bool): Whether or not this feedback is associated with the\n learner completing the task (\"Success!\").\n performance (float): A relative amount that this feedback contributes\n to the students' performance (think in terms of\n partial credit, like \"Triggering this feedback\n is worth 20% (.2)\").\n misconceptions (Message): A description of the misconception that\n is believed to be in the student's mind,\n or perhaps the relevant concept from the\n material that should be associated with\n this. (\"Variables must be initialized\n before they are used\").\n mistakes (Message): A description of the error or bug that the\n student has created (\"NameError on line 5: sum\n has not been defined\").\n hints (Message): A suggestion for what the student can do\n (\"Initialize the sum variable on line 2\").\n constraints (Message): A description of the task requirements or\n task type that the student has violated\n (\"You used a for loop, but this question\n expected you to use recursion.\").\n metacognitives (Message): A suggestion for more regulative\n strategies (\"You have been working for\n 5 hours, perhaps it is time to take\n a break?\").\n \"\"\"\n MESSAGE_TYPES = ['hint', 'mistake', 'misconception',\n 'constraint', 'metacognitive']\n\n def __init__(self, label, tool='instructor',\n category='Instructor feedback', priority=None, group=None,\n result=None, performance=None, misconception=None,\n mistake=None, hint=None, constraint=None,\n metacognitive=None):\n # Metadata\n self.label = label\n self.tool = tool\n self.category = category\n self.priority = priority\n self.group = group\n # Data\n self.result = result\n self.performance = performance\n self.misconception = misconception\n self.mistake = mistake\n self.hint = hint\n self.constraint = constraint\n self.metacognitive = metacognitive\n\n def __str__(self):\n return \"\".format(self.label)\n\n def __repr__(self):\n metadata = \"\"\n if self.tool is not None:\n metadata += \", tool=\" + self.tool\n if self.category is not None:\n metadata += \", category=\" + self.category\n if self.priority is not None:\n metadata += \", priority=\" + self.priority\n if self.group is not None:\n metadata += \", group=\" + str(self.group)\n data = \"\"\n return \"Feedback({}{}{})\".format(self.label, metadata, data)\n\n\n\"\"\"\nA Message is one of:\n str\n Dict with a `message` field and any other suitable fields, such as:\n html_message: An HTML message instead of a plaintext message.\n line: The line number to highlight\n error: The error message to render\n\"\"\"\n","src/lib/pedal/report/imperative.py":"\"\"\"\nImperative style commands for constructing feedback in a convenient way.\nUses a global report object (MAIN_REPORT).\n\"\"\"\n\n__all__ = ['set_success', 'compliment', 'give_partial', 'explain', 'explain_r',\n 'gently', 'gently_r', 'hide_correctness', 'suppress', 'log', 'debug',\n 'clear_report', 'get_all_feedback', 'MAIN_REPORT', 'guidance']\n\nfrom pedal.report.report import Report\n\n#: The global Report object. Meant to be used as a default singleton\n#: for any tool, so that instructors do not have to create their own Report.\n#: Of course, all APIs are expected to work with a given Report, and only\n#: default to this Report when no others are given.\nMAIN_REPORT = Report()\n\n\ndef set_success():\n \"\"\"\n Creates Successful feedback for the user, indicating that the entire\n assignment is done.\n \"\"\"\n MAIN_REPORT.set_success()\n\n\ndef compliment(message, line=None):\n \"\"\"\n Create a positive feedback for the user, potentially on a specific line of\n code.\n\n Args:\n message (str): The message to display to the user.\n line (int): The relevant line of code to reference.\n \"\"\"\n MAIN_REPORT.compliment(message, line)\n\n\ndef give_partial(value, message=None):\n \"\"\"\n Increases the user's current score by the `value`. Optionally display\n a positive message too.\n\n Args:\n value (number): The number to increase the user's score by.\n message (str): The message to display to the user.\n \"\"\"\n MAIN_REPORT.give_partial(value, message)\n\n\ndef explain(message, priority='medium', line=None, label='explain'):\n MAIN_REPORT.explain(message, priority, line, label=label)\n\n\ndef guidance(message, priority='medium', line=None, label='Guidance'):\n MAIN_REPORT.guidance(message, priority, line, label=label)\n\n\ndef gently(message, line=None, label='explain'):\n MAIN_REPORT.gently(message, line, label=label)\n\n\ndef gently_r(message, code, line=None, label=\"explain\"):\n gently(message + \"
({})\".format(code), line, label=label)\n return message\n\n\ndef explain_r(message, code, priority='medium', line=None, label=\"explain\"):\n explain(message + \"
({})\".format(code), priority, line, label=label)\n return message\n\n\ndef hide_correctness():\n MAIN_REPORT.hide_correctness()\n\n\ndef suppress(category, label=True):\n MAIN_REPORT.suppress(category, label)\n\n\ndef log(message):\n MAIN_REPORT.log(message)\n\n\ndef debug(message):\n MAIN_REPORT.debug(message)\n\n\ndef clear_report():\n MAIN_REPORT.clear()\n\n\ndef get_all_feedback():\n return MAIN_REPORT.feedback\n","src/lib/pedal/report/report.py":"from pedal.report.feedback import Feedback\n\n__all__ = ['Report']\n\n\nclass Report:\n \"\"\"\n A class for storing Feedback generated by Tools, along with any auxiliary\n data that the Tool might want to provide for other tools.\n\n Attributes:\n feedback (list of Feedback): The raw feedback generated for this Report\n so far.\n suppressions (list of tuple(str, str)): The categories and labels that\n have been suppressed so far.\n group (int or str): The label for the current group. Feedback given\n by a Tool will automatically receive the current `group`. This\n is used by the Source tool, for example, in order to group feedback\n by sections.\n group_names (dict[group:str]): A printable, student-facing name for the\n group. When a group needs to be rendered out to the user, this\n will override whatever label was going to be presented instead.\n group_order (sequence or callable or None): The mechanism to use to\n order groups. If a sequence, the order will be inferred based on\n the order of elements in the sequence. If a callable, the callable\n will be used as a key function for `sort`. If `None`, then defaults\n to the natural ordering of the groups. Defaults to `None`.\n hooks (dict[str: list[callable]): A dictionary mapping events to\n a list of callable functions. Tools can register functions on\n hooks to have them executed when the event is triggered by another\n tool. For example, the Assertions tool has hooks on the Source tool\n to trigger assertion resolutions before advancing to next sections.\n _results (dict of str => any): Maps tool names to their data. The\n namespace for a tool can be used to\n store whatever they want, but will\n probably be in a dictionary itself.\n \"\"\"\n group_order = None\n\n def __init__(self):\n \"\"\"\n Creates a new Report instance.\n \"\"\"\n self.clear()\n\n def clear(self):\n self.feedback = []\n self.suppressions = {}\n self._results = {}\n self.group = None\n self.group_names = {}\n self.hooks = {}\n\n def set_success(self, group=None):\n \"\"\"\n Creates Successful feedback for the user, indicating that the entire\n assignment is done.\n \"\"\"\n if group is None:\n group = self.group\n self.feedback.append(Feedback('set_success', priority='positive',\n result=True, group=group))\n\n def give_partial(self, value, message=None, group=None):\n if value is None:\n return False\n if group is None:\n group = self.group\n self.feedback.append(Feedback('give_partial', performance=value,\n priority='positive',\n group=group,\n mistake=message))\n return True\n\n def hide_correctness(self):\n self.suppressions['success'] = []\n\n def explain(self, message, priority='medium', line=None, group=None,\n label='explain'):\n misconception = {'message': message}\n if line is not None:\n misconception['line'] = line\n if group is None:\n group = self.group\n self.attach(label, priority=priority, category='instructor',\n group=group, misconception=misconception)\n\n def gently(self, message, line=None, group=None, label='explain'):\n self.explain(message, priority='student', line=line, group=group,\n label=label)\n\n def guidance(self, message, line=None, group=None, label='guidance'):\n hint = {'message': message}\n if line is not None:\n hint['line'] = line\n if group is None:\n group = self.group\n self.attach(label, priority='instructions', category='instructions', group=group, hint=hint)\n\n def compliment(self, message, line=None, group=None, label='explain'):\n self.explain(message, priority='positive', line=line, group=group,\n label=label)\n\n def attach(self, label, **kwargs):\n self.feedback.append(Feedback(label, **kwargs))\n\n def log(self, message):\n pass\n\n def debug(self, message):\n pass\n\n def suppress(self, category, label=True, where=True):\n \"\"\"\n Args:\n category (str): The category of feedback to suppress.\n label (str): A specific label to match against and suppress.\n where (bool or group): Which group of report to localize the\n suppression to. If instead `True` is passed, the suppression\n occurs in every group globally.\n TODO: Currently, only global suppression is supported.\n \"\"\"\n category = category.lower()\n if isinstance(label, str):\n label = label.lower()\n if category not in self.suppressions:\n self.suppressions[category] = []\n self.suppressions[category].append(label)\n\n def add_hook(self, event, function):\n \"\"\"\n Register the `function` to be executed when the given `event` is\n triggered.\n \n Args:\n event (str): An event name. Multiple functions can be triggered for\n the same `event`. The format is as follows:\n \"pedal.module.function.extra\"\n\n The `\".extra\"` component is optional to add further nuance, but\n the general idea is that you are referring to functions that,\n when called, should trigger other functions to be called first.\n function (callable): A callable function. This function should\n accept a keyword parameter named `report`, which will \n \"\"\"\n if event not in self.hooks:\n self.hooks[event] = []\n self.hooks[event].append(function)\n\n def execute_hooks(self, event):\n if event in self.hooks:\n for function in self.hooks[event]:\n function(report=self)\n\n def __getitem__(self, key):\n if key not in self._results:\n self._results[key] = {}\n return self._results[key]\n\n def __setitem__(self, key, value):\n self._results[key] = value\n\n def __contains__(self, key):\n return key in self._results\n","src/lib/pedal/report/__init__.py":"\"\"\"\nThe collection of classes and functions used to store the fundamental Report\nand Feedback objects.\n\"\"\"\n\nfrom pedal.report.report import Report\nfrom pedal.report.feedback import Feedback\nfrom pedal.report.imperative import *\n","src/lib/pedal/resolvers/core.py":"from pedal.report.imperative import MAIN_REPORT\n\n\ndef make_resolver(func, report=None):\n '''\n Decorates the given function as a Resolver. This means that when the\n function is executed, the `\"pedal.resolver.resolve\"` event will be\n triggered.\n \n Args:\n func (callable): The function to decorate.\n report (Report): The Report to trigger the event on. If None, then use\n the `MAIN_REPORT`.\n '''\n if report is None:\n report = MAIN_REPORT\n\n def resolver_wrapper():\n report.execute_hooks(\"pedal.resolvers.resolve\")\n return func()\n\n return resolver_wrapper\n","src/lib/pedal/resolvers/sectional.py":"import sys\n\nfrom pedal.resolvers import simple\nfrom pedal.report import MAIN_REPORT\n\n\ndef resolve(report=None, priority_key=None):\n \"\"\"\n Args:\n report (Report): The report object to resolve down. Defaults to the\n global MAIN_REPORT\n\n Returns\n str: A string of HTML feedback to be delivered\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n if priority_key is None:\n priority_key = simple.by_priority\n # Prepare feedbacks\n feedbacks = report.feedback\n feedbacks.sort(key=lambda f: (f.group or 0, priority_key(f)))\n suppressions = report.suppressions\n # Process\n final_success = False\n final_score = 0\n finals = {}\n found_failure = False\n for feedback in feedbacks:\n group = feedback.group or 0\n category = feedback.category.lower()\n if category in suppressions:\n if True in suppressions[category]:\n continue\n elif feedback.label.lower() in suppressions[category]:\n continue\n success, partial, message, data = simple.parse_feedback(feedback)\n final_success = success or final_success\n final_score += partial\n if message is not None:\n #print(\"RESETING GROUP\", group, message[:20], found_failure, feedback.priority)\n if group not in finals:\n finals[group] = []\n found_failure = False\n if feedback.priority not in ('positive', 'instructions'):\n if found_failure:\n continue\n found_failure = True\n entry = {'label': feedback.label,\n 'message': message,\n 'category': feedback.category,\n 'priority': feedback.priority,\n 'data': data}\n if feedback.priority == 'instructions':\n # Find end of instructions\n index = 0\n for feedback in finals[group]:\n if feedback['priority'] != 'instructions':\n break\n index += 1\n finals[group].insert(index, entry)\n elif feedback.priority != 'positive':\n finals[group].insert(0, entry)\n else:\n finals[group].append(entry)\n #from pprint import pprint\n #pprint(finals)\n final_hide_correctness = suppressions.get('success', False)\n if not finals:\n finals[0] = [{\n 'label': 'No errors',\n 'category': 'Instructor',\n 'data': [],\n 'priority': 'medium',\n 'message': \"No errors reported.\"\n }]\n return (final_success, final_score, final_hide_correctness, finals)\n","src/lib/pedal/resolvers/simple.py":"from pedal.report import MAIN_REPORT, Feedback\nfrom pedal.resolvers.core import make_resolver\n\nDEFAULT_CATEGORY_PRIORITY = [\n 'syntax',\n 'mistakes',\n 'instructor',\n 'analyzer',\n 'runtime',\n 'student',\n 'positive',\n 'instructions',\n 'uncategorized',\n]\n\n# For compatibility with the old feedback API\nLEGACY_CATEGORIZATIONS = {\n # 'student': 'runtime',\n 'parser': 'syntax',\n 'verifier': 'syntax',\n 'instructor': 'instructor'\n}\n\n\ndef by_priority(feedback):\n \"\"\"\n Converts a feedback into a numeric representation for sorting.\n\n Args:\n feedback (Feedback): The feedback object to convert\n Returns:\n float: A decimal number representing the feedback's relative priority.\n \"\"\"\n category = 'uncategorized'\n if feedback.category is not None:\n category = feedback.category.lower()\n priority = 'medium'\n if feedback.priority is not None:\n priority = feedback.priority.lower()\n priority = LEGACY_CATEGORIZATIONS.get(priority, priority)\n if category in DEFAULT_CATEGORY_PRIORITY:\n value = DEFAULT_CATEGORY_PRIORITY.index(category)\n else:\n value = len(DEFAULT_CATEGORY_PRIORITY)\n offset = .5\n if priority == 'low':\n offset = .7\n elif priority == 'high':\n offset = .3\n elif priority not in ('low', 'medium', 'high'):\n if priority in DEFAULT_CATEGORY_PRIORITY:\n value = DEFAULT_CATEGORY_PRIORITY.index(priority)\n offset = .1\n return value + offset\n\n\ndef parse_message(component):\n if isinstance(component, str):\n return component\n elif isinstance(component, list):\n return '
\\n'.join(parse_message(c) for c in component)\n elif isinstance(component, dict):\n if \"html\" in component:\n return component[\"html\"]\n elif \"message\" in component:\n return component[\"message\"]\n else:\n raise ValueError(\"Component has no message field: \" + str(component))\n else:\n raise ValueError(\"Invalid component type: \" + str(type(component)))\n\n\ndef parse_data(component):\n if isinstance(component, str):\n return [{'message': component}]\n elif isinstance(component, list):\n return component\n elif isinstance(component, dict):\n return [component]\n\n\ndef parse_feedback(feedback):\n # Default returns\n success = False\n performance = 0\n message = None\n data = []\n # Actual processing\n for feedback_type in Feedback.MESSAGE_TYPES:\n feedback_value = getattr(feedback, feedback_type)\n if feedback_value is not None:\n data.extend(parse_data(feedback_value))\n parsed_message = parse_message(feedback_value)\n if parsed_message is not None:\n message = parsed_message\n if feedback.result is not None:\n success = feedback.result\n if feedback.performance is not None:\n performance = feedback.performance\n return success, performance, message, data\n\n\n@make_resolver\ndef resolve(report=None, priority_key=None):\n \"\"\"\n Args:\n report (Report): The report object to resolve down. Defaults to the\n global MAIN_REPORT\n\n Returns\n str: A string of HTML feedback to be delivered\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n if priority_key is None:\n priority_key = by_priority\n # Prepare feedbacks\n feedbacks = report.feedback\n feedbacks.sort(key=priority_key)\n suppressions = report.suppressions\n # Process\n final_success = False\n final_score = 0\n final_message = None\n final_category = 'Instructor'\n final_label = 'No errors'\n final_data = []\n for feedback in feedbacks:\n category = feedback.category.lower()\n if category in suppressions:\n if True in suppressions[category]:\n continue\n elif feedback.label.lower() in suppressions[category]:\n continue\n success, partial, message, data = parse_feedback(feedback)\n final_success = success or final_success\n final_score += partial\n if (message is not None and\n final_message is None and\n feedback.priority != 'positive'):\n final_message = message\n final_category = feedback.category\n final_label = feedback.label\n final_data = data\n if final_message is None:\n final_message = \"No errors reported.\"\n final_hide_correctness = suppressions.get('success', False)\n if (not final_hide_correctness and final_success and\n final_label == 'No errors' and\n final_category == 'Instructor'):\n final_category = 'Complete'\n final_label = 'Complete'\n final_message = \"Great work!\"\n return (final_success, final_score, final_category,\n final_label, final_message, final_data,\n final_hide_correctness)\n","src/lib/pedal/resolvers/__init__.py":"\"\"\"\n\nResolver Types\n\nDoes there need to be some kind of hook for Tools to wrap up their business?\n\nSimple\n Find the highest priority feedback and show that, along with any positive feedback.\n\nSectional\n Find the highest priority feedback for each section, and show that along with any positive feedback.\n\nFull\n Report all feedback, grouped by tool/category/priority/time.\n\nFull Summary\n Report all feedback but divided into frequencies of labels grouped by tool/category/priority/time.\n\n\"\"\"\n","src/lib/pedal/sandbox/compatibility.py":"import sys\n\nfrom pedal.sandbox.sandbox import Sandbox\nfrom pedal.sandbox.messages import EXTENDED_ERROR_EXPLANATION\n\nfrom pedal.report import MAIN_REPORT, Feedback\n\n\ndef _check_sandbox(report):\n if 'run' not in report['sandbox']:\n report['sandbox']['run'] = Sandbox()\n return report['sandbox']['run']\n\n\ndef run_student(raise_exceptions=False, report=None, old_style_messages=False):\n if report is None:\n report = MAIN_REPORT\n sandbox = _check_sandbox(report)\n source_code = report['source']['code']\n filename = report['source']['filename']\n sandbox.run(source_code, as_filename=filename, report_exceptions=not raise_exceptions)\n if raise_exceptions:\n raise_exception(sandbox.exception, sandbox.exception_position,\n report=report, message=None if old_style_messages else sandbox.exception_formatted)\n return sandbox.exception\n\n\ndef queue_input(*inputs, **kwargs):\n if 'report' not in kwargs:\n report = MAIN_REPORT\n else:\n report = kwargs['report']\n sandbox = _check_sandbox(report)\n sandbox.set_input(inputs)\n\n\ndef reset_output(report=None):\n if report is None:\n report = MAIN_REPORT\n sandbox = _check_sandbox(report)\n sandbox.set_output(None)\n\n\ndef get_output(report=None):\n if report is None:\n report = MAIN_REPORT\n sandbox = _check_sandbox(report)\n return sandbox.output\n\n\ndef get_plots(report=None):\n if report is None:\n report = MAIN_REPORT\n sandbox = _check_sandbox(report)\n mock_plt = sandbox.modules['matplotlib.pyplot']\n return mock_plt.plots\n\n\ndef capture_output(function, *args, **kwargs):\n if 'report' in kwargs:\n report = kwargs['report']\n else:\n report = MAIN_REPORT\n sandbox = _check_sandbox(report)\n sandbox.set_output(None)\n sandbox.call(function.__name__, *args)\n return sandbox.output\n\n\ndef get_sandbox(report=None):\n if report is None:\n report = MAIN_REPORT\n sandbox = _check_sandbox(report)\n return sandbox\n\n\ndef raise_exception(exception, position=None, report=None, message=None):\n if report is None:\n report = MAIN_REPORT\n sandbox = _check_sandbox(report)\n if exception is None:\n return\n extended = EXTENDED_ERROR_EXPLANATION.get(exception.__class__, \"\")\n if message is None:\n message = \"{}
\\n{}\".format(str(exception), extended)\n # Skulpt compatible name lookup\n name = str(exception.__class__)[8:-2]\n report.attach(name, category='Runtime', tool='Sandbox',\n mistake={'message': message,\n 'error': exception,\n 'position': position,\n 'traceback': None})\n sandbox.exception = exception\n\n\ndef get_student_data(report=None):\n if report is None:\n report = MAIN_REPORT\n sandbox = _check_sandbox(report)\n return sandbox\n\n\ndef set_sandbox(sandbox, report=None):\n \"\"\"\n Update the sandbox to hold the new sandbox instance. Particularly useful\n for Skulpt, which needs to set the sandbox in an unusual way.\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n report['sandbox']['run'] = sandbox\n return sandbox\n\n\ndef trace_lines(report=None):\n if report is None:\n report = MAIN_REPORT\n sandbox = _check_sandbox(report)\n if sandbox.tracer_style == 'coverage':\n return sandbox.trace.lines - sandbox.trace.missing\n else:\n return []\n","src/lib/pedal/sandbox/exceptions.py":"import traceback\nimport os\nimport sys\n\ntry:\n TimeoutError\nexcept NameError:\n class TimeoutError(Exception):\n pass\n\nclass SandboxException(Exception):\n \"\"\"\n Generic base exception for sandbox errors.\n \"\"\"\n\nclass SandboxStudentCodeException(SandboxException):\n \"\"\"\n Caused by an error in student code\n \"\"\"\n def __init__(self, actual):\n self.actual = actual\n\nclass SandboxPreventModule(Exception):\n \"\"\"\n Caused by student attempting to load a module that they shouldn't.\n \"\"\"\n\n\nclass SandboxHasNoFunction(SandboxException):\n \"\"\"\n Caused by attempting to access a function that the student hasn't created.\n \"\"\"\n\n\nclass SandboxHasNoVariable(SandboxException):\n \"\"\"\n Caused by attempting to access a variable that the student hasn't created.\n \"\"\"\n\n\nclass SandboxNoMoreInputsException(Exception):\n \"\"\"\n Caused by the student calling `input` when the instructor hasn't provided\n enough inputs. Typically, the student has an infinite loop around their\n `input` function.\n \"\"\"\n\n\nBuiltinKeyError = KeyError\n\n\nclass KeyError(BuiltinKeyError):\n \"\"\"\n A version of KeyError that replaces the built-in with one small\n modification: when printing an explanatory message, the message is not\n rendered as a tuple. Because that's stupid and the fact that it made it\n into CPython is just rude.\n \n See Also:\n https://github.com/python/cpython/blob/master/Objects/exceptions.c#L1556\n \"\"\"\n __module__ = \"builtins\"\n\n def __init__(self, original, message):\n self.__cause__ = original.__cause__\n self.__traceback__ = original.__traceback__\n self.__context__ = original.__context__\n self.message = message\n\n def __str__(self):\n return self.message\n\n\ndef _add_context_to_error(e, message):\n if isinstance(e, BuiltinKeyError):\n new_args = repr(e.args[0]) + message\n e = KeyError(e, new_args)\n e.args = tuple([new_args])\n elif isinstance(e, OSError):\n # TODO: Investigate OSError, since they have so many args.\n # Might be weird.\n e.args = tuple([e.args[0] + message])\n return e\n elif e.args:\n e.args = tuple([e.args[0] + message])\n return e\nx=sys.stdout\nclass SandboxTraceback:\n \"\"\"\n Class for reformatting tracebacks to have more pertinent information.\n \"\"\"\n\n def __init__(self, exception, exc_info, full_traceback,\n instructor_filename, line_offset, student_filename,\n original_code_lines):\n \"\"\"\n Args:\n exception (Exception): The exception that was raised.\n exc_info (ExcInfo): The result of sys.exc_info() when the exception\n was raised.\n full_traceback (bool): Whether or not to provide the full traceback\n or just the parts relevant to students.\n instructor_filename (str): The name of the instructor file, which\n can be used to avoid reporting instructor code in the\n traceback.\n \"\"\"\n self.line_offset = line_offset\n self.exception = exception\n self.exc_info = exc_info\n self.full_traceback = full_traceback\n self.instructor_filename = instructor_filename\n self.student_filename = student_filename\n self.line_number = traceback.extract_tb(exc_info[2])[-1][1]\n self.original_code_lines = original_code_lines\n\n def _clean_traceback_line(self, line):\n return line.replace(', in ', '', 1)\n\n def format_exception(self, preamble=\"\"):\n if not self.exception:\n return \"\"\n if isinstance(self.exception, TimeoutError):\n return str(self.exception)\n cl, exc, tb = self.exc_info\n while tb and self._is_relevant_tb_level(tb):\n tb = tb.tb_next\n length = self._count_relevant_tb_levels(tb)\n tb_e = traceback.TracebackException(cl, self.exception, tb, limit=length,\n capture_locals=False)\n # print(list(), file=x)\n for frame in tb_e.stack:\n if frame.filename == os.path.basename(self.student_filename):\n frame.lineno += self.line_offset\n frame._line = self.original_code_lines[frame.lineno-1]\n lines = [self._clean_traceback_line(line)\n for line in tb_e.format()]\n lines[0] = \"Traceback:\\n\"\n return preamble + ''.join(lines)\n\n def _count_relevant_tb_levels(self, tb):\n length = 0\n while tb and not self._is_relevant_tb_level(tb):\n length += 1\n tb = tb.tb_next\n return length\n\n def _is_relevant_tb_level(self, tb):\n \"\"\"\n Determines if the give part of the traceback is relevant to the user.\n\n Returns:\n boolean: True means it is NOT relevant\n \"\"\"\n # Are in verbose mode?\n if self.full_traceback:\n return False\n filename, a_, b_, _ = traceback.extract_tb(tb, limit=1)[0]\n # Is the error in the student file?\n if filename == self.student_filename:\n return False\n # Is the error in the instructor file?\n if filename == self.instructor_filename:\n return True\n # Is the error in this test directory?\n current_directory = os.path.dirname(os.path.realpath(__file__))\n if filename.startswith(current_directory):\n return True\n # Is the error related to a file in the parent directory?\n parent_directory = os.path.dirname(current_directory)\n # Currently we don't refer to this?\n # Is the error in a local file?\n if filename.startswith('.'):\n return False\n # Is the error in an absolute path?\n if not os.path.isabs(filename):\n return False\n # Okay, it's not a student related file\n return True\n","src/lib/pedal/sandbox/messages.py":"# Skulpt has weird errors, and is missing some errors. Compatibility.\ntry:\n ParseError\nexcept NameError:\n class ParseError(Exception):\n pass\ntry:\n SyntaxError\nexcept NameError:\n class SyntaxError(Exception):\n pass\ntry:\n ReferenceError\nexcept NameError:\n class ReferenceError(Exception):\n pass\ntry:\n EOFError\nexcept NameError:\n class EOFError(Exception):\n pass\ntry:\n MemoryError\nexcept NameError:\n class MemoryError(Exception):\n pass\ntry:\n OSError\nexcept NameError:\n class OSError(Exception):\n pass\ntry:\n TokenError\nexcept NameError:\n class TokenError(Exception):\n pass\ntry:\n TimeLimitError\nexcept NameError:\n class TimeLimitError(Exception):\n pass\n\nEXTENDED_ERROR_EXPLANATION = {\n ParseError: \"A parse error means that Python does not understand the syntax on the line the error message points out. Common examples are forgetting commas beteween arguments or forgetting a :
(colon) on a for statement.
Suggestion: To fix a parse error you just need to look carefully at the line with the error and possibly the line before it. Make sure it conforms to all of Python's rules.\",\n TypeError: \"Type errors most often occur when an expression tries to combine two objects with types that should not be combined. Like using +
to add a number to a list instead of .append
, or dividing a string by a number.
Suggestion: To fix a type error you will most likely need to trace through your code and make sure the variables have the types you expect them to have.\",\n SyntaxError: \"This message indicates that Python can't figure out the syntax of a particular statement. Some examples are assigning to a literal, or a function call.
Suggestion: Check your assignment statements and make sure that the left hand side of the assignment is a variable, not a literal (e.g., 7 or \\\"hello\\\") or a function.\",\n NameError: \"A name error almost always means that you have used a variable before it has a value. Often this may be a simple typo, so check the spelling carefully.
Suggestion: Check the right hand side of assignment statements and your function calls, this is the most likely place for a NameError to be found. It really helps to step through your code, one line at a time, mentally keeping track of your variables.\",\n ValueError: \"A ValueError most often occurs when you pass a parameter to a built-in function, and the function is expecting one type and you pass something different. For instance, if you try to convert a non-numeric string to an int, you will get a ValueError:
int(\\\"Corgi\\\") # ValueError: invalid literal for int() with base 10
Suggestion: The error message gives you a pretty good hint about the name of the function as well as the value that is incorrect. Look at the error message closely and then trace back to the variable containing the problematic value. }\",\n AttributeError: \"This happens when you try to do SOMETHING.WHATEVER
and either SOMETHING wasn't declared or WHATEVER isn't an attribute of SOMETHING. This error message is telling you that the object on the left hand side of the dot, does not have the attribute or method on the right hand side.
Suggestion: You were probably trying to either get access to some data (weather.get) or append (a_list.append). If it's the first one, you should make sure the module is imported and that you are called its function correctly. If it's the second one, you should make sure you spelled \\\"append\\\" right and that you are using a variable with a list for a value.\",\n TokenError: \"Most of the time this error indicates that you have forgotten a right parenthesis or have forgotten to close a pair of quotes.
Suggestion: Check each line of your program and make sure that your parenthesis are balanced.\",\n IndexError: \"This message means that you are trying to index past the end of a string or a list. For example, if your list has 3 things in it and you try to access the item at position 5.
Suggestion: Remember that the first item in a list or string is at index position 0, quite often this message comes about because you are off by one. Remember in a list of length 3 the last legal index is 2.
favorite_colors = [\\\"red\\\", \\\"blue\\\", \\\"green\\\"]\\nfavorite_colors[2] # prints green favorite_color[3] # raises an IndexError
\",\n ImportError: \"This error message indicates that you are trying to import a module that does not exist, or is not in the same directory as your python script.
Suggestion: One problem may simply be that you have a typo - remember, you must not capitalize the module name. Another common problem is that you have placed the module in a different directory. Finally, if you're using a dataset module, then it might not be imported. Use the \\\"Import Datasets\\\" button below!\",\n ReferenceError: \"This is a really hard error to get, so I'm not entirely sure what you did.
Suggestion: Bring this code to the instructor. \",\n ZeroDivisionError: \"This tells you that you are trying to divide by 0. Typically this is because the value of the variable in the denominator of a division expression has the value 0.
Suggestion: Are you sure you are dividing by the right variable? Are you sure that that variable has the value you expect - is it possible that you counted the number of elements in an empty list, for instance?\",\n IndentationError: \"This error occurs when you have not indented your code properly. This is most likely to happen as part of an if, for, while or def statement.
Suggestion: Check your if, def, for, and while statements to be sure the lines are properly indented beneath them (seriously, this happens ALL the time). Another source of this error comes from copying and pasting code where you have accidentally left some bits of code lying around that don't belong there anymore. Finally, a very sinister but unlikely possibility is that you have some tab characters in your code, which look identical to four spaces. Never, ever use tabs, and carefully check code from the internet to make sure it doesn't have tabs.\",\n EOFError: \"If you are using input() or raw_input() commands, then this error happens when they don't get the right ending.
Suggestion: It's hard to protect against users. However, if you're using input(), you might be able to use raw_input() instead to avoid this problem. \",\n IOError: \"This is a very easy error to get. The most common reason is that you were trying to open a file and it wasn't in the right place.
Suggestion: Make sure that the file is in the right place - print out the file path, and then check that it's definitely on your computer at that location. If you need help doing file processing, you should probably check with an instructor.\",\n KeyError: \"A dictionary has a bunch of keys that you can use to get data. This error is caused by you trying to refer to a key that does not exist.
Suggestion: The most common reason you get this exception is that you have a typo in your dictionary access. Check your spelling. Also double check that the key definitely exists.\",\n MemoryError: \"Somehow, you have run out of memory.
Suggestion: Make sure you are filtering your dataset! Alternatively, bring your code to an instructor.\",\n OSError: \"It's hard to say what an OSError is without deep checking. Many things can cause it.
Suggestion: Bring your code to an instructor. \",\n TimeLimitError: \"A TimeLimit error means that BlockPy wasn't able to process your program fast enough. Typically, this means that you're iterating through too many elements.\"}\n","src/lib/pedal/sandbox/mocked.py":"\"\"\"\nMocked functions that can be used to prevent malicious or accidental `eval`\nbehavior.\n\"\"\"\nimport re\nimport types\n\nfrom pedal.sandbox.exceptions import (SandboxNoMoreInputsException,\n SandboxPreventModule)\n\n\ndef _disabled_compile(source, filename, mode, flags=0, dont_inherit=False):\n \"\"\"\n A version of the built-in `compile` method that fails with a runtime\n error.\n \"\"\"\n raise RuntimeError(\"You are not allowed to call 'compile'.\")\n\n\ndef _disabled_eval(object, globals=globals(), locals=None):\n \"\"\"\n A version of the built-in `eval` method that fails with a runtime\n error.\n \"\"\"\n raise RuntimeError(\"You are not allowed to call 'eval'.\")\n\n\n# -------------------------------------------------------------\n\n\ndef _disabled_exec(object, globals=globals(), locals=None):\n \"\"\"\n A version of the built-in `exec` method that fails with a runtime\n error.\n \"\"\"\n raise RuntimeError(\"You are not allowed to call 'exec'.\")\n\n\n# -------------------------------------------------------------\n\n\ndef _disabled_globals():\n \"\"\"\n A version of the built-in `globals` method that fails with a runtime\n error.\n \"\"\"\n raise RuntimeError(\"You are not allowed to call 'globals'.\")\n \n\nclass FunctionNotAllowed(Exception):\n pass\n \n\ndef disabled_builtin(name):\n def _disabled_version(*args, **kwargs):\n raise FunctionNotAllowed(\"You are not allowed to call '{}'.\".format(name))\n return _disabled_version\n\n\n_OPEN_FORBIDDEN_NAMES = re.compile(r\"(^[./])|(\\.py$)\")\n_OPEN_FORBIDDEN_MODES = re.compile(r\"[wa+]\")\n\n# TODO: Turn this into a function that lets us more elegantly specify valid and\n# invalid filenames/paths\n\n\ndef _restricted_open(name, mode='r', buffering=-1):\n if _OPEN_FORBIDDEN_NAMES.search(name):\n raise RuntimeError(\"The filename you passed to 'open' is restricted.\")\n elif _OPEN_FORBIDDEN_MODES.search(mode):\n raise RuntimeError(\"You are not allowed to 'open' files for writing.\")\n else:\n return _original_builtins['open'](name, mode, buffering)\n\n# TODO: Allow this to be flexible\n\n\ndef _restricted_import(name, globals=None, locals=None, fromlist=(), level=0):\n if name == 'pedal' or name.startswith('pedal.'):\n raise RuntimeError(\"You cannot import pedal!\")\n return _original_builtins['__import__'](name, globals, locals, fromlist, level)\n\n\ntry:\n __builtins__\nexcept NameError:\n _default_builtins = {'globals': globals,\n 'locals': locals,\n 'open': open,\n 'input': input,\n '__import__': __import__}\nelse:\n if isinstance(__builtins__, types.ModuleType):\n _default_builtins = __builtins__.__dict__\n else:\n _default_builtins = __builtins__\n\n_original_builtins = {\n 'globals': _default_builtins['globals'],\n 'locals': _default_builtins['locals'],\n 'open': _default_builtins['open'],\n 'input': _default_builtins['input'],\n 'exec': _default_builtins.get('exec', _disabled_exec),\n 'eval': _default_builtins.get('eval', _disabled_eval),\n 'compile': _default_builtins.get('compile', _disabled_compile),\n '__import__': _default_builtins['__import__']\n}\n\n\ndef make_inputs(input_list, repeat=None):\n \"\"\"\n Helper function for creating mock user input.\n\n Params:\n input_list (list of str): The list of inputs to be returned\n Returns:\n function (str=>str): The mock input function that is returned, which\n will return the next element of input_list each\n time it is called.\n \"\"\"\n generator = iter(input_list)\n\n def mock_input(prompt=''):\n print(prompt)\n try:\n return next(generator)\n except StopIteration as SI:\n if repeat is None:\n # TODO: Make this a custom exception\n raise SandboxNoMoreInputsException(\"User had no more input to give.\")\n else:\n return repeat\n\n return mock_input\n\n\n_sys_modules = {}\n\n\ndef _override_builtins(namespace, custom_builtins):\n \"\"\"\n Add the custom builtins to the `namespace` (and the original `__builtins__`)\n suitable for `exec`.\n \"\"\"\n # Obtain the dictionary of built-in methods, which might not exist in\n # some python versions (e.g., Skulpt)\n\n # Create a shallow copy of the dictionary of built-in methods. Then,\n # we'll take specific ones that are unsafe and replace them.\n namespace[\"__builtins__\"] = _default_builtins.copy()\n for name, function in custom_builtins.items():\n namespace[\"__builtins__\"][name] = function\n\n\ndef create_module(module_name):\n submodule_names = module_name.split(\".\")\n modules = {}\n root = types.ModuleType(submodule_names[0])\n modules[submodule_names[0]] = root\n reconstructed_path = submodule_names[0]\n for submodule_name in submodule_names[1:]:\n reconstructed_path += \".\" + submodule_name\n new_submodule = types.ModuleType(reconstructed_path)\n setattr(root, submodule_name, new_submodule)\n modules[reconstructed_path] = new_submodule\n return root, modules\n\n\nclass MockModule:\n def _generate_patches(self):\n return {k: v for k, v in vars(self).items()\n if not k.startswith('_')}\n\n def _add_to_module(self, module):\n for name, value in self._generate_patches().items():\n setattr(module, name, value)\n\n\nclass BlockedModule(MockModule):\n MODULE_NAME = \"this module\"\n\n def _generate_patches(self):\n return {'__getattr__': self.prevent_module}\n\n def prevent_module(self, **kwargs):\n raise SandboxPreventModule(\"You cannot import {module_name} from student code.\".format(\n module_name=self.MODULE_NAME\n ))\n\n\nclass MockPedal(BlockedModule):\n MODULE_NAME = \"pedal\"\n\n\nclass MockPlt(MockModule):\n \"\"\"\n Mock MatPlotLib library that can be used to capture plot data.\n\n Attributes:\n plots (list of dict): The internal list of plot dictionaries.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._reset_plots()\n\n def show(self, **kwargs):\n self.plots.append(self.active_plot)\n self._reset_plot()\n\n def unshown_plots(self):\n return self.active_plot['data']\n\n def __repr__(self):\n return repr(self.plots)\n\n def __str__(self):\n return str(self.plots)\n\n def _reset_plots(self):\n self.plots = []\n self._reset_plot()\n\n def _reset_plot(self):\n self.active_plot = {'data': [],\n 'xlabel': None, 'ylabel': None,\n 'title': None, 'legend': False}\n\n def hist(self, data, **kwargs):\n label = kwargs.get('label', None)\n self.active_plot['data'].append({'type': 'hist', 'values': data,\n 'label': label})\n\n def plot(self, xs, ys=None, **kwargs):\n label = kwargs.get('label', None)\n if ys is None:\n self.active_plot['data'].append({'type': 'line',\n 'x': list(range(len(xs))),\n 'y': xs, 'label': label})\n else:\n self.active_plot['data'].append({'type': 'line', 'x': xs,\n 'y': ys, 'label': label})\n\n def scatter(self, xs, ys, **kwargs):\n label = kwargs.get('label', None)\n self.active_plot['data'].append({'type': 'scatter', 'x': xs,\n 'y': ys, 'label': label})\n\n def xlabel(self, label, **kwargs):\n self.active_plot['xlabel'] = label\n\n def title(self, label, **kwargs):\n self.active_plot['title'] = label\n\n def suptitle(self, label, **kwargs):\n self.title(label, **kwargs)\n\n def ylabel(self, label, **kwargs):\n self.active_plot['ylabel'] = label\n\n def legend(self, **kwargs):\n self.active_plot['legend'] = True\n\n def _generate_patches(self):\n def dummy(**kwargs):\n pass\n\n return dict(hist=self.hist, plot=self.plot,\n scatter=self.scatter, show=self.show,\n xlabel=self.xlabel, ylabel=self.ylabel,\n title=self.title, legend=self.legend,\n xticks=dummy, yticks=dummy,\n autoscale=dummy, axhline=dummy,\n axhspan=dummy, axvline=dummy,\n axvspan=dummy, clf=dummy,\n cla=dummy, close=dummy,\n figlegend=dummy, figimage=dummy,\n suptitle=self.suptitle, text=dummy,\n tick_params=dummy, ticklabel_format=dummy,\n tight_layout=dummy, xkcd=dummy,\n xlim=dummy, ylim=dummy,\n xscale=dummy, yscale=dummy)\n","src/lib/pedal/sandbox/result.py":"class SandboxResult:\n \"\"\"\n Proxy class for wrapping results from executing student code. Attempts\n to perfectly emulate the underlying data value, so that users will never\n realize they have a proxy. The advantage is that special information is\n available in the corresponding Sandbox about this result that can give\n more context.\n \n Attributes:\n value (any): The actual data stored in this class that we are proxying.\n If the underlying proxy object has a field called `value`, then\n you can use either `_actual_value` to access the proxied object.\n _actual_call_id (int): The call that was used to generate this result.\n _actual_sandbox (Sandbox): The sandbox that was used to generate this\n result. If None, then the sandbox was lost.\n \n \"\"\"\n ASSIGNABLE_ATTRS = ['value', '_actual_call_id', '_actual_sandbox',\n '_clone_this_result']\n\n def __init__(self, value, call_id=None, sandbox=None):\n \"\"\"\n Args:\n value (any): Literally any type of data.\n call_id (int): The unique call ID that generated this result. If\n None, then the SandboxResult was generated by manipulating an earlier\n result.\n TODO: We could actually remember the operations applied to this\n instance and use them to reconstruct the transformations...\n sandbox (Sandbox): The sandbox that was used to generate this\n result. If None, then the sandbox was lost.\n \"\"\"\n self.value = value\n self._actual_call_id = call_id\n self._actual_sandbox = sandbox\n\n def __getattribute__(self, name):\n \"\"\"\n Get the attribute with the given `name`. This allows us to pass\n most attributes along to the underlying `value`, while still\n maintaining access to the proxy's attributes.\n \"\"\"\n v = object.__getattribute__(self, \"value\")\n if name == \"__class__\":\n return v.__class__\n elif name == \"__actual_class__\":\n return object.__getattribute__(self, \"__class__\")\n elif name == \"_actual_value\":\n return v\n elif name in SandboxResult.ASSIGNABLE_ATTRS:\n return object.__getattribute__(self, name)\n elif name == \"value\" and not hasattr(v, \"value\"):\n return v\n else:\n return SandboxResult(object.__getattribute__(v, name),\n object.__getattribute__(self, \"_actual_call_id\"),\n object.__getattribute__(self, \"_actual_sandbox\"))\n\n def __setattr__(self, name, value):\n if name in SandboxResult.ASSIGNABLE_ATTRS:\n object.__setattr__(self, name, value)\n else:\n setattr(self.value, name, value)\n\n def __delattr__(self, name):\n if name in SandboxResult.ASSIGNABLE_ATTRS:\n object.__delattr__(self, name, value)\n else:\n delattr(self.value, name, value)\n\n def _clone_this_result(self, new_value):\n \"\"\"\n Create a new SandboxResult based on this current one. Copies over the\n `call_id` and `sandbox`.\n \n Args:\n new_value (any): The new value to be proxying.\n Returns:\n SandboxResult\n \"\"\"\n return SandboxResult(new_value,\n call_id=self._actual_call_id,\n sandbox=self._actual_sandbox)\n\n def __repr__(self):\n \"\"\"\n Returns the representation of the proxied object.\n \n Returns:\n str: The `repr` of the proxied object.\n \"\"\"\n return repr(self.value)\n\n def __str__(self):\n \"\"\"\n Returns the string representation of the proxied object.\n \n Returns:\n str: The `str` of the proxied object.\n \"\"\"\n return str(self.value)\n\n def __bytes__(self):\n return bytes(self.value)\n\n def __format__(self, format_spec):\n return format(self.value, format_spec)\n\n def __call__(self, *args):\n \"\"\"\n Returns the result of calling the proxied object with the args.\n \n Returns:\n SandboxResult: A proxy of the Sandbox object.\n \"\"\"\n return self._clone_this_result(self.value(*args))\n\n def __hash__(self):\n return hash(self.value)\n\n def __bool__(self):\n return bool(self.value)\n\n def __dir__(self):\n return dir(self.value)\n\n def __instancecheck__(self, instance):\n return isinstance(self.value, instance)\n\n def __subclasscheck__(self, subclass):\n return issubclass(self.value, subclass)\n\n def __len__(self):\n '''\n Fun fact: cpython DEMANDS that __len__ return an integer. Not something\n that looks like an integer, but a true, honest-to-god integer that\n can fit into a slot.\n https://stackoverflow.com/questions/42521449/how-does-python-ensure-the-return-value-of-len-is-an-integer-when-len-is-cal\n '''\n return len(self.value)\n\n def __getitem__(self, key):\n return self._clone_this_result(self.value[key])\n\n def __setitem__(self, key, value):\n self.value[key] = value\n\n def __delitem__(self, key):\n del self.value[key]\n\n def __missing__(self, key):\n return self.value.__missing__(key)\n\n def __iter__(self):\n return iter(self.value)\n\n def __reversed__(self):\n return reversed(self.value)\n\n def __contains__(self, item):\n return self.value.__contains__(item)\n\n def __eq__(self, other):\n \"\"\"\n Test if the proxied object is equal to the given `other`.\n \n Args:\n other (any): The other object.\n \n Returns:\n bool or any: Returns whatever the proxy object's __eq__ returns.\n \"\"\"\n if isinstance(other, SandboxResult):\n return self.value == other.value\n return self.value == other\n\n def __lt__(self, other):\n if isinstance(other, SandboxResult):\n return self.value < other.value\n return self.value < other\n\n def __le__(self, other):\n if isinstance(other, SandboxResult):\n return self.value <= other.value\n return self.value <= other\n\n def __gt__(self, other):\n if isinstance(other, SandboxResult):\n return self.value > other.value\n return self.value > other\n\n def __ge__(self, other):\n if isinstance(other, SandboxResult):\n return self.value >= other.value\n return self.value >= other\n\n def __ne__(self, other):\n if isinstance(other, SandboxResult):\n return self.value != other.value\n return self.value != other\n\n ## Numeric Operations\n\n def __add__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value + other.value)\n return self._clone_this_result(self.value + other)\n\n def __sub__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value - other.value)\n return self._clone_this_result(self.value - other)\n\n def __mul__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value * other.value)\n return self._clone_this_result(self.value * other)\n\n def __matmul__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value.__matmul__(other.value))\n return self._clone_this_result(self.value.__matmul__(other))\n\n def __truediv__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value.__truediv__(other.value))\n return self._clone_this_result(self.value.__truediv__(other))\n\n def __floordiv__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value.__floordiv__(other.value))\n return self._clone_this_result(self.value.__floordiv__(other))\n\n def __mod__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value.__mod__(other.value))\n return self._clone_this_result(self.value.__mod__(other))\n\n def __divmod__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value.__divmod__(other.value))\n return self._clone_this_result(self.value.__divmod__(other))\n\n def __pow__(self, other, *modulo):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value.__pow__(other.value, *modulo))\n return self._clone_this_result(self.value.__pow__(other, *modulo))\n\n def __lshift__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value.__lshift__(other.value))\n return self._clone_this_result(self.value.__lshift__(other))\n\n def __rshift__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value.__rshift__(other.value))\n return self._clone_this_result(self.value.__rshift__(other))\n\n def __and__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value.__and__(other.value))\n return self._clone_this_result(self.value.__and__(other))\n\n def __xor__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value.__xor__(other.value))\n return self._clone_this_result(self.value.__xor__(other))\n\n def __or__(self, other):\n if isinstance(other, SandboxResult):\n return self._clone_this_result(self.value.__or__(other.value))\n return self._clone_this_result(self.value.__or__(other))\n\n def __radd__(self, other):\n if isinstance(self.value, str):\n return self._clone_this_result(self.value.__add__(other))\n return self._clone_this_result(self.value.__radd__(other))\n\n def __rsub__(self, other):\n return self._clone_this_result(self.value.__rsub__(other))\n\n def __rmul__(self, other):\n return self._clone_this_result(self.value.__rmul__(other))\n\n def __rmatmul__(self, other):\n return self._clone_this_result(self.value.__rmatmul__(other))\n\n def __rtruediv__(self, other):\n return self._clone_this_result(self.value.__rtruediv__(other))\n\n def __rfloordiv__(self, other):\n return self._clone_this_result(self.value.__rfloordiv__(other))\n\n def __rmod__(self, other):\n return self._clone_this_result(self.value.__rmod__(other))\n\n def __rdivmod__(self, other):\n return self._clone_this_result(self.value.__rdivmod__(other))\n\n def __rpow__(self, other):\n return self._clone_this_result(self.value.__rpow__(other))\n\n def __rlshift__(self, other):\n return self._clone_this_result(self.value.__rlshift__(other))\n\n def __rand__(self, other):\n return self._clone_this_result(self.value.__rand__(other))\n\n def __rxor__(self, other):\n return self._clone_this_result(self.value.__rxor__(other))\n\n def __ror__(self, other):\n return self._clone_this_result(self.value.__ror__(other))\n\n ## TODO: __iadd__ and other in-place assignment operators?\n\n def __neg__(self):\n return self._clone_this_result(self.value.__neg__())\n\n def __pos__(self):\n return self._clone_this_result(self.value.__pos__())\n\n def __abs__(self):\n return self._clone_this_result(self.value.__abs__())\n\n def __invert__(self):\n return self._clone_this_result(self.value.__invert__())\n\n def __complex__(self):\n return self._clone_this_result(self.value.__complex__())\n\n def __int__(self):\n return self._clone_this_result(self.value.__int__())\n\n def __float__(self):\n return self._clone_this_result(self.value.__float__())\n\n def __round__(self, *ndigits):\n return self._clone_this_result(self.value.__round__(*ndigits))\n\n def __trunc__(self):\n return self._clone_this_result(self.value.__trunc__())\n\n def __floor__(self):\n return self._clone_this_result(self.value.__floor__())\n\n def __ceil__(self):\n return self._clone_this_result(self.value.__ceil__())\n\n def __enter__(self):\n return self.value.__enter__()\n\n def __exit__(self, exc_type, exc_value, traceback):\n return self.value.__exit__(exc_type, exc_value, traceback)\n\n def __await__(self):\n return self.value.__await__()\n\n def __aiter__(self):\n return self.value.__aiter__()\n\n def __anext__(self):\n return self.value.__anext__()\n\n def __aenter__(self):\n return self.value.__aenter__()\n\n def __aexit__(self, exc_type, exc_value, traceback):\n return self.value.__aexit__(exc_type, exc_value, traceback)\n","src/lib/pedal/sandbox/sandbox.py":"from pprint import pprint\nimport ast\nimport re\nimport sys\nimport io\nimport os\nimport string\nfrom unittest.mock import patch\n\nfrom pedal.report import MAIN_REPORT\nfrom pedal.sandbox import mocked\nfrom pedal.sandbox.exceptions import (SandboxTraceback, SandboxHasNoFunction,\n SandboxStudentCodeException,\n SandboxHasNoVariable, _add_context_to_error)\nfrom pedal.sandbox.timeout import timeout\nfrom pedal.sandbox.messages import EXTENDED_ERROR_EXPLANATION\nfrom pedal.sandbox.result import SandboxResult\nfrom pedal.sandbox.tracer import (SandboxCallTracer, SandboxCoverageTracer,\n SandboxBasicTracer)\n\n\ndef _dict_extends(d1, d2):\n \"\"\"\n Helper function to create a new dictionary with the contents of the two\n given dictionaries. Does not modify either dictionary, and the values are\n copied shallowly. If there are repeats, the second dictionary wins ties.\n\n The function is written to ensure Skulpt compatibility.\n\n Args:\n d1 (dict): The first dictionary\n d2 (dict): The second dictionary\n Returns:\n dict: The new dictionary\n \"\"\"\n d3 = {}\n for key, value in d1.items():\n d3[key] = value\n for key, value in d2.items():\n d3[key] = value\n return d3\n\n\nclass SandboxVariable:\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\n\nclass DataSandbox:\n \"\"\"\n Simplistic Mixin class that contains the functions for accessing a\n self-contained student data namespace.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.data = {}\n\n def get_names_by_type(self, type, exclude_builtins=True):\n result = []\n for name, value in self.data.items():\n if isinstance(value, type):\n if exclude_builtins and name.startswith('__'):\n continue\n result.append(name)\n return result\n\n def get_values_by_type(self, type, exclude_builtins=True):\n names = self.get_names_by_type(type, exclude_builtins)\n return [self.data[name] for name in names]\n\n def get_variables_by_type(self, type, exclude_builtins=True):\n names = self.get_names_by_type(type, exclude_builtins)\n return [(name, self.data[name]) for name in names]\n\n @property\n def functions(self):\n \"\"\"\n Retrieve a list of all the callable names in the students' namespace.\n In other words, get a list of all the functions the student defined.\n\n Returns:\n list of callables\n \"\"\"\n return {k: v for k, v in self.data.items() if callable(v)}\n\n @property\n def var(self):\n return {k: SandboxVariable(k, v) for k, v in self.data.items()}\n\n def __repr__(self):\n return \"\"\n\nclass Sandbox(DataSandbox):\n \"\"\"\n\n The Sandbox is a container that can safely execute student code and store\n the result.\n\n Attributes:\n data: The namespace produced by the students' code. This is basically\n a dictionary mapping valid python names to their values.\n raw_output (str): The exact literal results of all the `print` calls\n made so far, including the \"\\n\" characters.\n output (list of str): The current lines of output, broken up by\n distinct print calls (not \"\\n\" characters). Note that this will\n not have any \"\\n\" characters unless you explicitly printed them.\n output_contexts (dict[str:list[str]]): The output for each call context.\n call_id (int): The current call_id of the most recent call. Is\n initially 0, indicating the original sandbox creation.\n modules: A dictionary of the mocked modules (accessible by their\n imported names).\n context: A list of strings representing the code previously run through\n this sandbox via .call.\n contextualize (bool): Whether or not to contextualize stack frames.\n \"\"\"\n\n CONTEXT_MESSAGE = (\n \"\\n\\nThe error above occurred when I ran:
\\n{context}
\"\n )\n FILE_CONTEXT_MESSAGE = (\n \"\\n\\nThe error above occurred when I ran your file: {filename}\"\n )\n INPUT_CONTEXT_MESSAGE = (\n \"And entered the inputs:\\n```\\n{inputs}\\n```\"\n )\n TRACER_STYLES = {\n 'coverage': SandboxCoverageTracer,\n 'calls': SandboxCallTracer,\n 'none': SandboxBasicTracer,\n }\n\n def __init__(self, initial_data=None,\n initial_raw_output=None,\n initial_exception=None,\n modules=None, full_traceback=False,\n tracer_style='none',\n threaded=False, report=None,\n context=None, result_proxy=SandboxResult,\n instructor_filename=\"instructor_tests.py\",\n allowed_functions=None):\n \"\"\"\n Args:\n initial_data (dict[str:Any]): An initial namespace to provide when\n executing the students' code. The keys must be strings and\n should be valid Python names. Defaults to None, which will be\n an empty namespace.\n initial_exception (Exception): An initial exception to load into\n the Sandbox. Usually you will let the students' code generate\n its own exceptions, but if you're constructing a sandbox you\n might need to specify one. Defaults to None.\n modules: A dictionary of strings (valid python package names) that\n map to either the value True (if we provide a default\n implementation) or a user-created MockedModule. By default,\n we mock out the following modules:\n * matplotlib\n * pedal\n context (False, None, or list[str]): How to contextualize calls by\n default in this Sandbox. False means no contextualization.\n None (default) means contextualize automatically. If you give\n a list[str], then it assumes you want to contextualize\n automatically but starting off with the given strings.\n initial_raw_output (str): The initial printed output for the\n sandbox. Usually defaults to None to indicate a blank printed\n area.\n instructor_filename (str): The filename to display in tracebacks,\n when executing student code in instructor tests. Although you\n can specify something else, defaults to \"instructor_tests.py\".\n \"\"\"\n super().__init__()\n if initial_data is None:\n initial_data = {}\n self.data = initial_data\n\n # Context\n self.call_id = 0\n self.target_contexts = {self.call_id: []}\n self.call_contexts = {self.call_id: []}\n self.input_contexts = {self.call_id: []}\n self.context = context\n self.keep_context = False\n # Update outputs\n self.set_output(initial_raw_output)\n # filename\n self.instructor_filename = instructor_filename\n # Temporary data\n self._temporaries = set()\n self._backups = {}\n # Exception\n self.exception = initial_exception\n self.exception_position = None\n self.exception_formatted = None\n self.report_exceptions_mode = False\n self.raise_exceptions_mode = False\n # Input\n self.set_input(None)\n self._input_tracker = self._track_inputs()\n # Modules\n if modules is None:\n modules = {'matplotlib': True,\n 'pedal': mocked.MockPedal()\n }\n self.mocked_modules = {}\n self.modules = {}\n self.add_mocks(modules)\n self.mocked_functions = {\n 'compile': mocked._disabled_compile,\n 'eval': mocked._disabled_eval,\n 'exec': mocked._disabled_exec,\n 'globals': mocked._disabled_globals,\n 'open': mocked._restricted_open,\n '__import__': mocked._restricted_import,\n }\n if allowed_functions is not None:\n for function_name in allowed_functions:\n if function_name in self.mocked_functions:\n del self.mocked_functions[function_name]\n # Patching\n self._current_patches = []\n # Settings\n self.full_traceback = full_traceback\n self.MAXIMUM_VALUE_LENGTH = 120\n # Tracer Styles\n self.tracer_style = tracer_style\n # Proxying results\n self.result_proxy = result_proxy\n # report\n if report is None:\n report = MAIN_REPORT\n self.report = report\n # Threading\n self.threaded = threaded\n self.allowed_time = 3\n\n def _set_tracer_style(self, tracer_style):\n self._tracer_style = tracer_style.lower()\n self.trace = self.TRACER_STYLES[tracer_style.lower()]()\n\n def _get_tracer_style(self):\n return self._tracer_style\n\n tracer_style = property(_get_tracer_style, _set_tracer_style)\n\n def add_mocks(self, modules):\n \"\"\"\n :param modules: Keyword listing of modules and their contents\n (MockedModules) or True (if its one that we have a\n default implementation for).\n :type modules: dict\n \"\"\"\n for module_name, module_data in modules.items():\n self._add_mock(module_name, module_data)\n\n def _add_mock(self, module_name, module_data):\n # MatPlotLib's PyPlot\n if module_name == 'matplotlib':\n matplotlib, modules = mocked.create_module('matplotlib.pyplot')\n self.mocked_modules.update(modules)\n if module_data is True:\n mock_plt = mocked.MockPlt()\n mock_plt._add_to_module(matplotlib.pyplot)\n self.modules['matplotlib.pyplot'] = mock_plt\n else:\n module_data._add_to_module(matplotlib.pyplot)\n else:\n root, modules = mocked.create_module(module_name)\n self.mocked_modules.update(modules)\n self.modules[module_name] = module_data\n module_data._add_to_module(root)\n\n def set_output(self, raw_output):\n \"\"\"\n Change the current printed output for the sandbox to the given value.\n If None is given, then clears all the given output (empty list for\n `output` and empty string for `raw_output`).\n\n Args:\n raw_output (str): The new raw_output for the sandbox. To compute\n the `output` attribute, the system splits and rstrips at\n newlines.\n \"\"\"\n if raw_output is None:\n self.raw_output = \"\"\n self.output = []\n self.output_contexts = {self.call_id: list(self.output)}\n else:\n self.raw_output = raw_output\n lines = raw_output.rstrip().split(\"\\n\")\n self.output = [line.rstrip() for line in lines]\n self.output_contexts[self.call_id] = list(self.output)\n\n def append_output(self, raw_output):\n \"\"\"\n Adds the string of `raw_output` to the current `raw_output` attribute.\n The added string will be split on newlines and rstripped to append\n to the `output` attribute.\n\n Args:\n raw_output (str): The new raw_output for the sandbox. To compute\n the `output` attribute, the system splits and rstrips at\n newlines.\n \"\"\"\n self.raw_output += raw_output\n lines = raw_output.rstrip().split(\"\\n\")\n lines = [line.rstrip() for line in lines]\n if self.raw_output:\n self.output.extend(lines)\n self.output_contexts[self.call_id].extend(lines)\n\n def set_input(self, inputs, clear=True):\n \"\"\"\n Queues the given value as the next arguments to the `input` function.\n \"\"\"\n if inputs is None:\n self.inputs = []\n if clear:\n self.inputs.clear()\n if isinstance(inputs, str):\n self.inputs.append(inputs)\n elif isinstance(inputs, (list, tuple)):\n self.inputs.extend(inputs)\n elif inputs is not None:\n # TODO: intelligently handle custom generator\n self.inputs = inputs\n\n def _track_inputs(self):\n \"\"\"\n Wraps an input function with a tracker.\n \"\"\"\n\n def _input_tracker(prompt, *args, **kwargs):\n print(prompt)\n if self.inputs:\n value_entered = self.inputs.pop(0)\n else:\n # TODO: Make this smarter, more elegant in choosing IF we should repeat 0\n value_entered = '0'\n self.input_contexts[self.call_id].append(value_entered)\n return value_entered\n\n return _input_tracker\n\n def _purge_temporaries(self):\n \"\"\"\n Delete any variables in the namespace that have been made as\n temporaries. This happens automatically after you execute code.\n \"\"\"\n for key in self._temporaries:\n if key in self._backups:\n self.data[key] = self.backups[key]\n else:\n del self.data[key]\n self._temporaries = set()\n\n def _is_long_value(self, value):\n return len(repr(value)) > 25\n\n def _make_temporary(self, category, name, value, context):\n \"\"\"\n Create a temporary variable in the namespace for the given\n category/name. This is used to load arguments into the namespace to\n be used in function calls. Temporaries are only created if the value's\n repr length is too long, as defined by _is_long_value.\n\n Args:\n category (str): A categorical division for the temporary variable\n that can help keep the namespace distinctive - there are a\n few different kinds of categories (e.g., for regular positional\n args, star args, kwargs).\n name (str): A distinctive ID for this variable. The final variable\n name will be \"_temporary__\".\n value: The value for this argument.\n Returns:\n str: The new name for the temporary variable.\n \"\"\"\n if isinstance(value, SandboxVariable):\n return value.name\n if not self._is_long_value(value):\n return repr(value)\n key = '_temporary_{}_{}'.format(category, name)\n if key in self.data:\n self._backups[key] = self.data[key]\n self._temporaries.add(key)\n self.data[key] = value\n if context is None:\n self.call_contexts[self.call_id].append(\"{} = {}\".format(key, value))\n return key\n\n def run_file(self, filename, as_filename=None, modules=None, inputs=None,\n threaded=None, context=None, report_exceptions=None,\n raise_exceptions=None):\n \"\"\"\n Load the given filename and execute it within the current namespace.\n \n Args:\n context (False, None, or list[str]): The context to give any\n exceptions. If None, then the recorded context will be used. If\n a string, tracebacks will be shown with the given context. If\n False, no context will be given.\n \"\"\"\n if as_filename is None:\n as_filename = filename\n with open(filename, 'r') as code_file:\n code = code_file.read() + '\\n'\n self.run(code, as_filename, modules, inputs, threaded,\n context, report_exceptions, raise_exceptions)\n\n def list(self, *args):\n pass\n\n def call(self, function, *args, **kwargs):\n \"\"\"\n Args:\n function (str): The name of the function to call that was defined\n by the user.\n as_filename (str): The filename to use when calling this function.\n Defaults to the instructor filename, since you are calling\n code on the student's behalf.\n target (str): The new variable in the namespace to assign to. By\n default this will be \"_\". If you use None, then no variable\n will be assigned to. Note that this could overwrite a variable\n in the user namespace.\n TODO: Add a feature to prevent user namespace overwriting.\n input (list of str): The strings to send in to calls to input.\n You can also pass in a generator to construct strings\n dynamically.\n threaded (bool): Whether or not the function execution should be\n executed in a separate thread. Defaults to True. This prevents\n timeouts from occuring in the students' code (a TimeOutError\n will be thrown after 3 seconds).\n context (False, None, or list[str]): The context to give any\n exceptions. If None, then the recorded context will be used. If\n a string, tracebacks will be shown with the given context. If\n False, no context will be given.\n keep_context (bool): Whether or not to stay in the current context,\n or to start a new one. Defaults to False.\n Returns:\n If the call was successful, returns the result of executing the\n code. Otherwise, it will return an Exception relevant to the\n failure (might be a SandboxException, might be a user-space\n exception).\n \"\"\"\n # Confirm that the function_name exists\n if function not in self.functions:\n if function not in self.data:\n self.exception = SandboxHasNoVariable(\n \"The function {function} does not exist.\".format(function=function)\n )\n else:\n self.exception = SandboxHasNoFunction(\n \"The variable {function} is not a function.\".format(function=function)\n )\n return self.exception\n # Parse kwargs for any special arguments.\n as_filename = kwargs.pop('as_filename', self.instructor_filename)\n target = kwargs.pop('target', '_')\n modules = kwargs.pop('modules', {})\n inputs = kwargs.pop('inputs', None)\n threaded = kwargs.pop('threaded', self.threaded)\n context = kwargs.pop('context', self.context)\n keep_context = kwargs.pop('keep_context', self.keep_context)\n report_exceptions = kwargs.pop('report_exceptions', self.report_exceptions_mode)\n raise_exceptions = kwargs.pop('raise_exceptions', self.raise_exceptions_mode)\n # Create the actual arguments and call\n if not keep_context or not self.call_id:\n self.call_id += 1\n self.output_contexts[self.call_id] = []\n self.call_contexts[self.call_id] = []\n self.input_contexts[self.call_id] = []\n # Always update the target context to be most recent\n self.target_contexts[self.call_id] = target\n actual, student = self._construct_call(function, args, kwargs, target,\n context)\n if context is None:\n context = student\n # if context is None:\n # self.call_contexts[self.call_id].append(student_call)\n # if context is not False:\n # self.call_contexts[self.call_id] = context\n self.run(actual, as_filename=as_filename, modules=modules,\n inputs=inputs, threaded=threaded,\n context=context, keep_context=keep_context,\n report_exceptions=report_exceptions,\n raise_exceptions=raise_exceptions)\n self._purge_temporaries()\n if self.exception is None:\n self._ = self.data[target]\n if self.result_proxy is not None:\n self._ = self.result_proxy(self._, call_id=self.call_id,\n sandbox=self)\n return self._\n else:\n # TODO: Might need to wrap this in case the student was supposed\n # to return an exception - weird circumstance though\n return self.exception\n\n def make_safe_variable(self, name):\n \"\"\"\n Tries to construct a safe variable name in the current namespace, based\n off the given one. This is accomplished by appending a \"_\" and a number\n of increasing value until no comparable name exists in the namespace.\n This is particularly useful when you want to create a variable name to\n assign to, but you are concerned that the user might have a variable\n with that name already, which their code relies on.\n \n Args:\n name (str): A desired target name.\n Returns:\n str: A safe target name, based off the given one.\n \"\"\"\n current_addition = \"\"\n attempt_index = 2\n while name + current_addition in self.data:\n current_addition = \"_{}\".format(attempt_index)\n attempt_index += 1\n return name + current_addition\n\n def _construct_call(self, function, args, kwargs, target, context):\n str_args = [self._make_temporary('arg', index, value, context)\n for index, value in enumerate(args)]\n str_kwargs = [\"{}={}\".format(key,\n self._make_temporary('kwarg', key, value, context))\n for key, value in kwargs.items()]\n arguments = \", \".join(str_args + str_kwargs)\n call = \"{}({})\".format(function, arguments)\n if target is None:\n actual = call\n else:\n actual = \"{} = {}\".format(target, call)\n student_call = call if target is \"_\" else actual\n return actual, student_call\n\n def _start_patches(self, *patches):\n self._current_patches.append(patches)\n for patch in patches:\n patch.start()\n\n def _stop_patches(self):\n patches = self._current_patches.pop()\n for patch in patches:\n patch.stop()\n\n def _capture_exception(self, exception, exc_info, report_exceptions,\n raise_exceptions, context, keep_context,\n as_filename=\"\", code=\"\"):\n self.exception = exception\n if context is not False:\n if context is None or keep_context:\n contexts = self.call_contexts[self.call_id]\n if context is not None:\n contexts.append(context)\n context = '\\n'.join(contexts)#[1:])\n if context.strip():\n context = self.CONTEXT_MESSAGE.format(context=context)\n inputs = self.input_contexts[self.call_id]\n if inputs is not None and inputs:\n inputs = \"\\n\".join(inputs)\n context += \"\\n\"+self.INPUT_CONTEXT_MESSAGE.format(inputs=inputs)\n else:\n context = self.FILE_CONTEXT_MESSAGE.format(filename=self.report['source']['filename'])\n self.exception = _add_context_to_error(self.exception, context)\n line_offset = self.report['source'].get('line_offset', 0)\n student_filename = self.report['source'].get('filename', as_filename)\n if 'lines' in self.report['source']:\n lines = self.report['source']['lines']\n else:\n lines = code.split(\"\\n\")\n traceback = SandboxTraceback(self.exception, exc_info,\n self.full_traceback,\n self.instructor_filename,\n line_offset, student_filename,\n lines)\n self.exception_position = {'line': traceback.line_number}\n self.exception_formatted = traceback.format_exception()\n self.exception_name = str(self.exception.__class__)[8:-2]\n # Do we add the exception to the report?\n if report_exceptions is False:\n return True\n if report_exceptions is None and not self.report_exceptions_mode:\n return True\n self.report.attach(self.exception_name,\n group=self.report.group,\n category='Runtime', tool='Sandbox',\n mistake={'message': self.exception_formatted,\n 'error': self.exception})\n if raise_exceptions is True:\n raise SandboxStudentCodeException(self.exception)\n return False\n\n def run(self, code, as_filename=None, modules=None, inputs=None,\n threaded=None, report_exceptions=True, raise_exceptions=False,\n context=False, keep_context=False):\n \"\"\"\n Execute the given string of code in this sandbox.\n \n Args:\n code (str): The string of code to be executed.\n as_filename (str): The filename to use when executing the code -\n this is cosmetic, technically speaking, it has no relation\n to anything on disk. It will be present in tracebacks.\n Defaults to Source's filename.\n modules (dict[str:Module]): Modules to mock.\n inputs (list[str]): The inputs to give from STDIN, as a list of\n strings. You can also give a function that emulates the\n input function; e.g., consuming a prompt (str) and producing\n strings. This could be used to make a more interactive input\n system.\n context (str): The context to give any exceptions.\n If None, then the recorded context will be used. If a string,\n tracebacks will be shown with the given context. If False,\n no context will be given (the default).\n threaded (bool): whether or not to run this code in a separate\n thread. Defaults to :attribute:`Sandbox.threaded`.\n report_exceptions (bool): Whether or not to capture exceptions.\n \"\"\"\n # Handle any threading if necessary\n if threaded is None:\n threaded = self.threaded\n if threaded:\n try:\n return timeout(self.allowed_time, self.run, code, as_filename,\n modules, inputs, False,\n report_exceptions, raise_exceptions,\n context, keep_context)\n except TimeoutError as timeout_exception:\n self._capture_exception(timeout_exception, sys.exc_info(),\n report_exceptions, raise_exceptions,\n context, keep_context, as_filename,\n code)\n return self\n \n if as_filename is None:\n as_filename = os.path.basename(self.report['source']['filename'])\n # todo: earlier version of inputs being made?\n if inputs is not None:\n self.set_input(inputs)\n # Override builtins and mock stuff out\n mocked_functions = self.mocked_functions.copy()\n mocked_functions['input'] = self._input_tracker\n mocked_functions['raw_input'] = self._input_tracker\n mocked_functions['sys'] = sys\n mocked_functions['os'] = os\n mocked._override_builtins(self.data, mocked_functions)\n\n self.exception = None\n self.exception_position = None\n self.exception_formatted = None\n\n # Patch in dangerous built-ins\n x = sys.stdout\n capture_stdout = io.StringIO()\n self._start_patches(\n patch.dict('sys.modules', self.mocked_modules),\n patch('sys.stdout', capture_stdout),\n patch('time.sleep', return_value=None),\n )\n # TODO: Hack, add more flexibile way to specify unusable modules\n for module in list(sys.modules.keys()):\n if module.startswith('pedal.'):\n del sys.modules[module]\n try:\n compiled_code = compile(code, as_filename, 'exec')\n with self.trace._as_filename(as_filename, code):\n exec(compiled_code, self.data)\n except Exception as user_exception:\n self._stop_patches()\n info = sys.exc_info()\n self._capture_exception(user_exception, info,\n report_exceptions, raise_exceptions,\n context, keep_context, as_filename,\n code)\n else:\n self._stop_patches()\n finally:\n self.append_output(capture_stdout.getvalue())\n if context is None:\n self.call_contexts[self.call_id].append(code)\n elif isinstance(context, str):\n self.call_contexts[self.call_id].append(context)\n elif context is not False:\n self.call_contexts[self.call_id] = context\n return self\n\n\ndef run(initial_data=None, initial_raw_output=None, initial_exception=None,\n allowed_functions=None,\n modules=None, inputs=None, report_exceptions=True, raise_exceptions=False,\n context=None,\n full_traceback=False, tracer_style='none', threaded=False,\n result_proxy=SandboxResult,\n instructor_filename=\"instructor_tests.py\",\n code=None, as_filename=None, report=None):\n if report is None:\n report = MAIN_REPORT\n if 'run' not in report['sandbox']:\n report['sandbox']['settings'] = [\n initial_data, initial_raw_output, initial_exception, modules,\n full_traceback, tracer_style, threaded, report, context,\n result_proxy, instructor_filename, allowed_functions\n ]\n report['sandbox']['run'] = Sandbox(*report['sandbox']['settings'])\n\n sandbox = report['sandbox']['run']\n if code is None:\n code = report['source']['code']\n sandbox.run(code, as_filename, modules, inputs, threaded,\n report_exceptions, raise_exceptions, context=context, keep_context=False)\n return sandbox\n\n\ndef reset(report=None):\n if report is None:\n report = MAIN_REPORT\n if 'settings' in report['sandbox']:\n report['sandbox']['run'] = Sandbox(*report['sandbox']['settings'])\n else:\n run(report=report)\n","src/lib/pedal/sandbox/timeout.py":"\"\"\"\nA module that exposes a useful method (`timeout`) that can execute a\nfunction asynchronously and terminiate if it exceeds a given `duration`.\n\"\"\"\n\nimport sys\nimport time\n\ntry:\n import threading\nexcept BaseException:\n threading = None\ntry:\n import ctypes\nexcept BaseException:\n ctypes = None\n\n\nclass InterruptableThread(threading.Thread):\n '''\n A thread that can be interrupted.\n '''\n\n def __init__(self, func, args, kwargs):\n threading.Thread.__init__(self)\n self.func, self.args, self.kwargs = func, args, kwargs\n self.daemon = True\n self.result = None\n self.exc_info = (None, None, None)\n\n def run(self):\n '''\n Begin thread execution, calling the `func` that was originally\n passed in.\n '''\n try:\n self.result = self.func(*self.args, **self.kwargs)\n except Exception:\n self.exc_info = sys.exc_info()\n\n @staticmethod\n def _async_raise(thread_id, exception):\n '''\n Static method to raise an error asychronously using the ctypes module.\n '''\n # Cache the function for convenience\n RaiseAsyncException = ctypes.pythonapi.PyThreadState_SetAsyncExc\n\n states_modified = RaiseAsyncException(ctypes.c_long(thread_id),\n ctypes.py_object(exception))\n if states_modified == 0:\n raise ValueError(\"nonexistent thread id\")\n elif states_modified > 1:\n RaiseAsyncException(thread_id, 0)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")\n\n def raise_exception(self, exception):\n '''\n Trigger a thread ending exception!\n '''\n assert self.is_alive(), \"thread must be started\"\n for thread_id, thread in threading._active.items():\n if thread is self:\n InterruptableThread._async_raise(thread_id, exception)\n return\n\n def terminate(self):\n self.exc_info = sys.exc_info()\n self.raise_exception(SystemExit)\n\n\ndef timeout(duration, func, *args, **kwargs):\n \"\"\"\n Executes a function and kills it (throwing an exception) if it runs for\n longer than the specified duration, in seconds.\n \"\"\"\n\n # If libraries are not available, then we execute normally\n if None in (threading, ctypes):\n return func(*args, **kwargs)\n\n target_thread = InterruptableThread(func, args, kwargs)\n target_thread.start()\n target_thread.join(duration)\n\n if target_thread.is_alive():\n target_thread.terminate()\n timeout_exception = TimeoutError('Your code took too long to run '\n '(it was given {} seconds); '\n 'maybe you have an infinite loop?'.format(duration))\n raise timeout_exception\n else:\n if target_thread.exc_info[0] is not None:\n ei = target_thread.exc_info\n # Python 2 had the three-argument raise statement; thanks to PEP\n # 3109 for showing how to convert that to valid Python 3 statements.\n e = ei[0](ei[1])\n e.__traceback__ = ei[2]\n e.exc_info = target_thread.exc_info\n raise e\n\n\n# =========================================================================\n\n\nclass _TimeoutData:\n \"\"\"\n Port of Craig Estep's AdaptiveTimeout JUnit rule from the VTCS student\n library.\n \"\"\"\n\n # -------------------------------------------------------------\n def __init__(self, ceiling):\n self.ceiling = ceiling # sec\n self.maximum = ceiling * 2 # sec\n self.minimum = 0.25 # sec\n self.threshold = 0.6\n self.rampup = 1.4\n self.rampdown = 0.5\n self.start = self.end = 0\n self.non_terminating_methods = 0\n\n # -------------------------------------------------------------\n\n def before_test(self):\n \"\"\"\n Call this before a test case runs in order to reset the timer.\n \"\"\"\n self.start = time.time()\n\n # -------------------------------------------------------------\n\n def after_test(self):\n \"\"\"\n Call this after a test case runs. This will examine how long it took\n the test to execute, and if it required an amount of time greater than\n the current ceiling, it will adaptively adjust the allowed time for\n the next test.\n \"\"\"\n self.end = time.time()\n diff = self.end - self.start\n\n if diff > self.ceiling:\n self.non_terminating_methods += 1\n\n if self.non_terminating_methods >= 2:\n if self.ceiling * self.rampdown < self.minimum:\n self.ceiling = self.minimum\n else:\n self.ceiling = (self.ceiling * self.rampdown)\n elif diff > self.ceiling * self.threshold:\n if self.ceiling * self.rampup > self.maximum:\n self.ceiling = self.maximum\n else:\n self.ceiling = (self.ceiling * self.rampup)\n","src/lib/pedal/sandbox/tracer.py":"import sys\nimport os\n\ntry:\n import coverage\nexcept ImportError:\n coverage = None\n\ntry:\n from bdb import Bdb, BdbQuit\nexcept Exception:\n class Bdb:\n pass\n\n\n class BdbQuit:\n pass\n\n\nclass SandboxBasicTracer:\n def __init__(self):\n super().__init__()\n self.filename = \"student.py\"\n\n def _as_filename(self, filename, code):\n if os.path.isabs(filename):\n self.filename = filename\n else:\n self.filename = os.path.abspath(filename)\n self.code = code\n return self\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, traceback):\n pass\n\n\nclass SandboxCoverageTracer(SandboxBasicTracer):\n def __init__(self):\n super().__init__()\n if coverage is None:\n raise ImportError(\"The coverage package is not available.\")\n self.n_missing = None\n self.n_statements = None\n self.pc_covered = None\n self.missing = set()\n self.lines = set()\n # self.s = sys.stdout\n\n def __enter__(self):\n # Force coverage to accept the code\n self.original = coverage.python.get_python_source\n\n def _get_source_correctly(reading_filename):\n print(reading_filename, file=self.s)\n if reading_filename == self.filename:\n return self.code\n else:\n return self.original(reading_filename)\n\n coverage.python.get_python_source = _get_source_correctly\n self.coverage = coverage.Coverage()\n self.coverage.start()\n\n def __exit__(self, exc_type, exc_val, traceback):\n self.coverage.stop()\n self.coverage.save()\n # Restore the get_python_source reader\n coverage.python.get_python_source = self.original\n self.original = None\n # Actually analyze the data, attach some data\n analysis = self.coverage._analyze(self.filename)\n # print(vars(self.coverage._analyze(self.filename)), file=self.s)\n self.n_missing = analysis.numbers.n_missing\n self.n_statements = analysis.numbers.n_statements\n self.pc_covered = analysis.numbers.pc_covered\n self.missing = analysis.missing\n self.lines = analysis.statements - analysis.missing\n \n @property\n def percent_covered(self):\n return self.pc_covered\n\n\nclass SandboxCallTracer(SandboxBasicTracer, Bdb):\n def __init__(self):\n super().__init__()\n self.calls = {}\n\n def user_call(self, frame, argument_list):\n code = frame.f_code\n name = code.co_name\n if name not in self.calls:\n self.calls[name] = []\n self.calls[name].append(code)\n\n def __enter__(self):\n self.reset()\n self._old_trace = sys.gettrace()\n sys.settrace(self.trace_dispatch)\n\n def __exit__(self, exc_type, exc_val, traceback):\n sys.settrace(self._old_trace)\n self.quitting = True\n # Return true to suppress exception (if it is a BdbQuit)\n return isinstance(exc_type, BdbQuit)\n","src/lib/pedal/sandbox/__init__.py":"from pedal.report import MAIN_REPORT\nfrom pedal.sandbox.sandbox import Sandbox, DataSandbox\n\n# Compatibility API\n'''\nrun_student\nqueue_input\nreset_output\nget_output\n'''\n\n\ndef reset(report=None):\n if report is None:\n report = MAIN_REPORT\n report['sandbox']['run'] = Sandbox(filename=report['source']['filename'])\n\n\ndef run(raise_exceptions=True, report=None, coverage=False, threaded=False, inputs=None):\n if report is None:\n report = MAIN_REPORT\n if 'run' not in report['sandbox']:\n report['sandbox']['run'] = Sandbox(filename=report['source']['filename'], threaded=threaded)\n sandbox = report['sandbox']['run']\n source_code = report['source']['code']\n sandbox.record_coverage = coverage\n sandbox.run(source_code, _as_filename=report['source']['filename'], _inputs=inputs)\n if raise_exceptions and sandbox.exception is not None:\n name = str(sandbox.exception.__class__)[8:-2]\n report.attach(name, category='Runtime', tool='Sandbox',\n section=report['source']['section'],\n mistakes={'message': sandbox.format_exception(),\n 'error': sandbox.exception})\n return sandbox\n","src/lib/pedal/source/sections.py":"from pedal.report import MAIN_REPORT\nimport ast\n\n\n#def move_to_section(section_number, name, report=None):\n# pass\n\ndef _calculate_section_number(section_index):\n return int((section_index+1)/2)\n\ndef next_section(name=\"\", report=None):\n if report is None:\n report = MAIN_REPORT\n report.execute_hooks('source.next_section.before')\n source = report['source']\n #if not report['source']['success']:\n # return False\n source['section'] += 2\n section_index = source['section']\n section_number = _calculate_section_number(section_index)\n sections = source['sections']\n found = len(source['sections'])\n if section_index < found:\n if source['independent']:\n source['code'] = ''.join(sections[section_index])\n old_code = ''.join(sections[:section_index])\n source['line_offset'] = len(old_code.split(\"\\n\"))-1\n else:\n source['code'] = ''.join(sections[:section_index + 1])\n report.group = section_index\n else:\n report.attach('Syntax error', category='Syntax', tool='Source',\n mistake=(\"Tried to advance to next section but the \"\n \"section was not found. Tried to load section \"\n \"{count}, but there were only {found} sections.\"\n ).format(count=section_number, found=found))\n\ndef check_section_exists(section_number, report=None):\n \"\"\"\n Checks that the right number of sections exist. The prologue before the\n first section is 0, while subsequent ones are 1, 2, 3, etc. \n So if you have 3 sections in your code plus the prologue,\n you should pass in 3 and not 4 to verify that all of them exist.\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n if not report['source']['success']:\n return False\n found = int((len(report['source']['sections']) - 1) / 2)\n if section_number > found:\n report.attach('Syntax error', category='Syntax', tool='Source',\n group=report['source']['section'],\n mistake=(\"Incorrect number of sections in your file. \"\n \"Expected {count}, but only found {found}\"\n ).format(count=section_number, found=found))\n\n\ndef verify_section(report=None):\n if report is None:\n report = MAIN_REPORT\n source = report['source']\n #if not source['success']:\n # return False\n code = source['code']\n try:\n parsed = ast.parse(code, source['filename'])\n source['ast'] = parsed\n except SyntaxError as e:\n report.attach('Syntax error', category='Syntax', tool='Source',\n group=source['section'],\n mistake={'message': \"Invalid syntax on line \"\n + str(e.lineno+source['line_offset'])+\"\\n\",\n 'error': e,\n 'position': {\"line\": e.lineno}})\n source['success'] = False\n if 'ast' in source:\n del source['ast']\n return source['success']\n\n\nclass _finish_section:\n def __init__(self, number, *functions):\n if isinstance(number, int):\n self.number = number\n else:\n self.number = -1\n functions = [number] + list(functions)\n self.functions = functions\n for function in functions:\n self(function, False)\n\n def __call__(self, f=None, quiet=True):\n if f is not None:\n f()\n if quiet:\n print(\"\\tNEXT SECTION\")\n\n def __enter__(self):\n pass\n\n def __exit__(self, x, y, z):\n print(\"\\tNEXT SECTION\")\n # return wrapped_f\n\n\ndef finish_section(number, *functions, **kwargs):\n if 'next_section' in kwargs:\n next_section = kwargs['next_section']\n else:\n next_section = False\n if len(functions) == 0:\n x = _finish_section(number, *functions)\n x()\n else:\n result = _finish_section(number, *functions)\n if next_section:\n print(\"\\tNEXT SECTION\")\n return result\n\n\ndef section(number):\n \"\"\"\n \"\"\"\n pass\n\n\ndef precondition(function):\n pass\n\n\ndef postcondition(function):\n pass\n","src/lib/pedal/source/__init__.py":"\"\"\"\nA package for verifying source code.\n\"\"\"\n\nfrom pedal.source.sections import *\nfrom pedal.report import MAIN_REPORT\nimport re\nimport ast\n\nNAME = 'Source'\nSHORT_DESCRIPTION = \"Verifies source code and attaches it to the report\"\nDESCRIPTION = '''\n'''\nREQUIRES = []\nOPTIONALS = []\nCATEGORY = 'Syntax'\n\n__all__ = ['NAME', 'DESCRIPTION', 'SHORT_DESCRIPTION', 'REQUIRES', 'OPTIONALS',\n 'set_source', 'check_section_exists', 'next_section', 'verify_section',\n 'set_source_file']\nDEFAULT_PATTERN = r'^(##### Part .+)$'\n\n\ndef set_source(code, filename='__main__.py', sections=False, independent=False,\n report=None):\n \"\"\"\n Sets the contents of the Source to be the given code. Can also be\n optionally given a filename.\n\n Args:\n code (str): The contents of the source file.\n filename (str): The filename of the students' code. Defaults to\n __main__.py.\n sections (str or bool): Whether or not the file should be divided into\n sections. If a str, then it should be a\n Python regular expression for how the sections\n are separated. If False, there will be no\n sections. If True, then the default pattern\n will be used: '^##### Part (\\\\d+)$'\n report (Report): The report object to store data and feedback in. If\n left None, defaults to the global MAIN_REPORT.\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n report['source']['code'] = code\n report['source']['full'] = code\n report['source']['lines'] = code.split(\"\\n\")\n report['source']['filename'] = filename\n report['source']['independent'] = independent\n report['source']['success'] = True\n if not sections:\n report['source']['sections'] = None\n report['source']['section'] = None\n _check_issues(code, report)\n else:\n if sections:\n pattern = DEFAULT_PATTERN\n else:\n pattern = sections\n report.group = 0\n report['source']['section_pattern'] = pattern\n report['source']['section'] = 0\n report['source']['line_offset'] = 0\n report['source']['sections'] = re.split(pattern, code,\n flags=re.MULTILINE)\n report['source']['code'] = report['source']['sections'][0]\n\n\ndef _check_issues(code, report):\n if code.strip() == '':\n report.attach('Blank source', category='Syntax', tool=NAME,\n group=report['source']['section'],\n mistake=\"Source code file is blank.\")\n report['source']['success'] = False\n try:\n parsed = ast.parse(code, report['source']['filename'])\n report['source']['ast'] = parsed\n except SyntaxError as e:\n report.attach('Syntax error', category='Syntax', tool='Source',\n group=report['source']['section'],\n mistake={'message': \"Invalid syntax on line \"\n + str(e.lineno)\n + \"\\n\\n\"+str(e),\n 'error': e,\n 'position': {\"line\": e.lineno}})\n report['source']['success'] = False\n report['source']['ast'] = ast.parse(\"\")\n\n\ndef get_program(report=None):\n if report is None:\n report = MAIN_REPORT\n return report['source']['code']\n\ndef set_source_file(filename, sections=False, independent=False, report=None):\n if report is None:\n report = MAIN_REPORT\n try:\n with open(filename, 'r') as student_file:\n set_source(student_file.read(), filename=filename,\n sections=sections, independent=independent,\n report=report)\n except IOError:\n message = (\"The given filename ('{filename}') was either not found\"\n \" or could not be opened. Please make sure the file is\"\n \" available.\").format(filename=filename)\n report.attach('Source File Not Found', category='Syntax', tool='Source',\n group=0 if sections else None,\n mistake={'message': message})\n report['source']['success'] = False\n","src/lib/pedal/tifa/builtin_definitions.py":"from pedal.tifa.type_definitions import (UnknownType, FunctionType,\n NumType, NoneType, BoolType,\n TupleType, ListType, StrType,\n FileType, DictType, ModuleType,\n SetType, DayType, TimeType, ClassType,\n LiteralNum)\n\ndef get_builtin_module(name):\n if name == 'matplotlib':\n return ModuleType('matplotlib',\n submodules={\n 'pyplot': ModuleType('pyplot', fields={\n 'plot': FunctionType(name='plot', returns=NoneType()),\n 'hist': FunctionType(name='hist', returns=NoneType()),\n 'scatter': FunctionType(name='scatter', returns=NoneType()),\n 'show': FunctionType(name='show', returns=NoneType()),\n 'xlabel': FunctionType(name='xlabel', returns=NoneType()),\n 'ylabel': FunctionType(name='ylabel', returns=NoneType()),\n 'title': FunctionType(name='title', returns=NoneType()),\n })\n })\n elif name == 'pprint':\n return ModuleType('pprint',\n fields={\n 'pprint': FunctionType(name='pprint', returns=NoneType())\n })\n elif name == 'random':\n return ModuleType('random',\n fields={\n 'randint': FunctionType(name='randint', returns=NumType())\n })\n elif name == 'string':\n return ModuleType('string',\n fields={\n 'letters': StrType(empty=False),\n 'digits': StrType(empty=False),\n 'ascii_letters': StrType(empty=False),\n 'punctuation': StrType(empty=False),\n 'printable': StrType(empty=False),\n 'whitespace': StrType(empty=False),\n 'ascii_uppercase': StrType(empty=False),\n 'ascii_lowercase': StrType(empty=False),\n 'hexdigits': StrType(empty=False),\n 'octdigits': StrType(empty=False),\n })\n elif name == 'turtle':\n return ModuleType('turtle',\n fields={\n 'forward': FunctionType(name='forward', returns=NoneType()),\n 'backward': FunctionType(name='backward', returns=NoneType()),\n 'color': FunctionType(name='color', returns=NoneType()),\n 'right': FunctionType(name='right', returns=NoneType()),\n 'left': FunctionType(name='left', returns=NoneType()),\n })\n elif name == 'parking':\n return ModuleType('parking',\n fields={\n 'Time': FunctionType(name='Time', returns=TimeType()),\n 'now': FunctionType(name='now', returns=TimeType()),\n 'Day': FunctionType(name='Day', returns=DayType()),\n 'today': FunctionType(name='today', returns=DayType()),\n }),\n elif name == 'math':\n return ModuleType('math',\n fields={\n 'ceil': FunctionType(name='ceil', returns=NumType()),\n 'copysign': FunctionType(name='copysign', returns=NumType()),\n 'fabs': FunctionType(name='fabs', returns=NumType()),\n 'factorial': FunctionType(name='factorial', returns=NumType()),\n 'floor': FunctionType(name='floor', returns=NumType()),\n 'fmod': FunctionType(name='fmod', returns=NumType()),\n 'frexp': FunctionType(name='frexp', returns=NumType()),\n 'fsum': FunctionType(name='fsum', returns=NumType()),\n 'gcd': FunctionType(name='gcd', returns=NumType()),\n 'isclose': FunctionType(name='isclose', returns=BoolType()),\n 'isfinite': FunctionType(name='isfinite', returns=BoolType()),\n 'isinf': FunctionType(name='isinf', returns=BoolType()),\n 'isnan': FunctionType(name='isnan', returns=BoolType()),\n 'ldexp': FunctionType(name='ldexp', returns=NumType()),\n 'modf': FunctionType(name='modf', returns=NumType()),\n 'trunc': FunctionType(name='trunc', returns=NumType()),\n 'log': FunctionType(name='log', returns=NumType()),\n 'log1p': FunctionType(name='log1p', returns=NumType()),\n 'log2': FunctionType(name='log2', returns=NumType()),\n 'log10': FunctionType(name='log10', returns=NumType()),\n 'pow': FunctionType(name='pow', returns=NumType()),\n 'sqrt': FunctionType(name='sqrt', returns=NumType()),\n 'sin': FunctionType(name='sin', returns=NumType()),\n 'cos': FunctionType(name='cos', returns=NumType()),\n 'tan': FunctionType(name='tan', returns=NumType()),\n 'asin': FunctionType(name='asin', returns=NumType()),\n 'acos': FunctionType(name='acos', returns=NumType()),\n 'atan': FunctionType(name='atan', returns=NumType()),\n 'atan2': FunctionType(name='atan2', returns=NumType()),\n 'hypot': FunctionType(name='hypot', returns=NumType()),\n 'degrees': FunctionType(name='degrees', returns=NumType()),\n 'radians': FunctionType(name='radians', returns=NumType()),\n 'sinh': FunctionType(name='sinh', returns=NumType()),\n 'cosh': FunctionType(name='cosh', returns=NumType()),\n 'tanh': FunctionType(name='tanh', returns=NumType()),\n 'asinh': FunctionType(name='asinh', returns=NumType()),\n 'acosh': FunctionType(name='acosh', returns=NumType()),\n 'atanh': FunctionType(name='atanh', returns=NumType()),\n 'erf': FunctionType(name='erf', returns=NumType()),\n 'erfc': FunctionType(name='erfc', returns=NumType()),\n 'gamma': FunctionType(name='gamma', returns=NumType()),\n 'lgamma': FunctionType(name='lgamma', returns=NumType()),\n 'pi': NumType(),\n 'e': NumType(),\n 'tau': NumType(),\n 'inf': NumType(),\n 'nan': NumType(),\n })\n\n\ndef _builtin_sequence_constructor(sequence_type):\n \"\"\"\n Helper function for creating constructors for the Set and List types.\n These constructors use the subtype of the arguments.\n\n Args:\n sequence_type (Type): A function for creating new sequence types.\n \"\"\"\n\n def sequence_call(tifa, function_type, callee, args, position):\n # TODO: Should inherit the emptiness too\n return_type = sequence_type(empty=True)\n if args:\n return_type.subtype = args[0].index(LiteralNum(0))\n return_type.empty = False\n return return_type\n\n return sequence_call\n\n\ndef _builtin_zip(tifa, function_type, callee, args, position):\n \"\"\"\n Definition of the built-in zip function, which consumes a series of\n sequences and returns a list of tuples, with each tuple composed of the\n elements of the sequence paired (or rather, tupled) together.\n \"\"\"\n if args:\n tupled_types = TupleType(subtypes=[])\n for arg in args:\n tupled_types.append(arg.index(0))\n return ListType(tupled_types, empty=False)\n return ListType(empty=True)\n\n\n# TODO: Exceptions\n\ndef get_builtin_function(name):\n # Void Functions\n if name == \"print\":\n return FunctionType(name=\"print\", returns=NoneType())\n # Math Functions\n elif name in (\"int\", \"abs\", \"float\", \"len\", \"ord\", \"pow\", \"round\", \"sum\"):\n return FunctionType(name=name, returns=NumType())\n # Boolean Functions\n elif name in (\"bool\", \"all\", \"any\", \"isinstance\"):\n return FunctionType(name=name, returns=BoolType())\n # String Functions\n elif name in (\"str\", 'chr', 'bin', 'repr', 'input'):\n return FunctionType(name=name, returns=StrType())\n # File Functions\n elif name == \"open\":\n return FunctionType(name=\"open\", returns=FileType())\n # List Functions\n elif name == \"map\":\n return FunctionType(name=\"map\", returns=ListType(empty=False))\n elif name == \"list\":\n return FunctionType(name=\"list\",\n definition=_builtin_sequence_constructor(ListType))\n # Set Functions\n elif name == \"set\":\n return FunctionType(name=\"set\",\n definition=_builtin_sequence_constructor(SetType))\n # Dict Functions\n elif name == \"dict\":\n return FunctionType(name=\"dict\", returns=DictType())\n # Pass through\n elif name == \"sorted\":\n return FunctionType(name=\"sorted\", returns='identity')\n elif name == \"reversed\":\n return FunctionType(name=\"reversed\", returns='identity')\n elif name == \"filter\":\n return FunctionType(name=\"filter\", returns='identity')\n # Special Functions\n elif name == \"type\":\n return FunctionType(name=\"type\", returns=UnknownType())\n elif name == \"range\":\n return FunctionType(name=\"range\", returns=ListType(NumType(), empty=False))\n elif name == \"dir\":\n return FunctionType(name=\"dir\", returns=ListType(StrType(), empty=False))\n elif name == \"max\":\n return FunctionType(name=\"max\", returns='element')\n elif name == \"min\":\n return FunctionType(name=\"min\", returns='element')\n elif name == \"zip\":\n return FunctionType(name=\"zip\", returns=_builtin_zip)\n elif name == \"__import__\":\n return FunctionType(name=\"__import__\", returns=ModuleType())\n elif name == \"globals\":\n return FunctionType(name=\"globals\",\n returns=DictType(keys=StrType(),\n values=UnknownType(),\n empty=False))\n elif name in (\"classmethod\", \"staticmethod\"):\n return FunctionType(name=name, returns='identity')","src/lib/pedal/tifa/identifier.py":"class Identifier:\n \"\"\"\n A representation of an Identifier, encapsulating its current level of\n existence, scope and State.\n\n Attributes:\n exists (bool): Whether or not the variable actually is defined anywhere.\n It is possible that a variable was retrieved that does\n not actually exist yet, which indicates it might need to\n be created.\n in_scope (bool): Whether or not the variable exists in the current\n scope. Used to detect the presence of certain kinds\n of errors where the user is using a variable from\n a different scope.\n scoped_name (str): The fully qualified name of the variable, including\n its scope chain.\n state (State): The current state of the variable.\n \"\"\"\n\n def __init__(self, exists, in_scope=False, scoped_name=\"UNKNOWN\", state=\"\"):\n self.exists = exists\n self.in_scope = in_scope\n self.scoped_name = scoped_name\n self.state = state\n","src/lib/pedal/tifa/messages.py":"import ast\n\nOPERATION_DESCRIPTION = {\n ast.Pow: \"an exponent\",\n ast.Add: \"an addition\",\n ast.Mult: \"a multiplication\",\n ast.Sub: \"a subtraction\",\n ast.Div: \"a division\",\n ast.FloorDiv: \"a division\",\n ast.Mod: \"a modulo\",\n ast.LShift: \"a left shift\",\n ast.RShift: \"a right shift\",\n ast.BitOr: \"a bit or\",\n ast.BitAnd: \"a bit and\",\n ast.BitXor: \"a bit xor\",\n ast.And: \"an and\",\n ast.Or: \"an or\",\n ast.Eq: \"an ==\",\n ast.NotEq: \"a !=\",\n ast.Lt: \"a <\",\n ast.LtE: \"a <=\",\n ast.Gt: \"a >\",\n ast.GtE: \"a >=\",\n ast.Is: \"an is\",\n ast.IsNot: \"an is not\",\n ast.In: \"an in\",\n ast.NotIn: \"an not in\",\n}\n\n\ndef _format_message(issue, data):\n if issue == 'Action after return':\n # A path had a statement after a return.\n return (\"You performed an action after already returning from a \"\n \"function, on line {line}. You can only return on a path \"\n \"once.\").format(line=data['position']['line'])\n elif issue == 'Return outside function':\n # Attempted to return outside of a function\n return (\"You attempted to return outside of a function on line {line}.\"\n \" But you can only return from within a function.\"\n ).format(line=data['position']['line'])\n elif issue == \"Multiple Return Types\":\n return (\"Your function returned {actual} on line {line}, even though you defined it to\"\n \" return {expected}. Your function should return values consistently.\"\n ).format(expected=data['expected'], actual=data['actual'], line=data['position']['line'])\n elif issue == 'Write out of scope':\n # DEPRECATED\n # Attempted to modify a variable in a higher scope\n return False\n return (\"You attempted to write a variable from a higher scope \"\n \"(outside the function) on line {line}. You should only \"\n \"use variables inside the function they were declared in.\"\n ).format(line=data['position']['line'])\n elif issue == 'Unconnected blocks':\n # Any names with ____\n return (\"It looks like you have unconnected blocks on line {line}. \"\n \"Before you run your program, you must make sure that all \"\n \"of your blocks are connected that there are no unfilled \"\n \"holes.\").format(line=data['position']['line'])\n elif issue == 'Iteration Problem':\n # Iteration list is the iteration variable\n return (\"The variable {name}
was iterated on line \"\n \"{line} but you used the same variable as the iteration \"\n \"variable. You should choose a different variable name \"\n \"for the iteration variable. Usually, the iteration variable \"\n \"is the singular form of the iteration list (e.g., \"\n \"for a_dog in dogs:
).\").format(\n line=data['position']['line'],\n name=data['name'])\n elif issue == 'Initialization Problem':\n # A variable was read before it was defined\n return (\"The variable {name}
was used on line {line}, \"\n \"but it was not given a value on a previous line. \"\n \"You cannot use a variable until it has been given a value.\"\n ).format(line=data['position']['line'], name=data['name'])\n elif issue == 'Possible Initialization Problem':\n # A variable was read but was not defined in every branch\n if data['name'] == '*return':\n return False\n return (\"The variable {name}
was used on line {line}, \"\n \"but it was possibly not given a value on a previous \"\n \"line. You cannot use a variable until it has been given \"\n \"a value. Check to make sure that this variable was \"\n \"declared in all of the branches of your decision.\"\n ).format(line=data['position']['line'], name=data['name'])\n elif issue == 'Unused Variable':\n # A variable was not read after it was defined\n name = data['name']\n if data['type'].is_equal('function'):\n kind = 'function'\n body = 'definition'\n else:\n kind = 'variable'\n body = 'value'\n return (\"The {kind} {name}
was given a {body}, but \"\n \"was never used after that.\"\n ).format(name=name, kind=kind, body=body)\n elif issue == 'Overwritten Variable':\n return (\"The variable {name}
was given a value, but \"\n \"{name}
was changed on line {line} before it \"\n \"was used. One of the times that you gave {name}
\"\n \"a value was incorrect.\"\n ).format(line=data['position']['line'], name=data['name'])\n elif issue == 'Iterating over Non-list':\n if 'name' not in data or data['name'] is None:\n expression = \"expression\"\n else:\n expression = \"variable {}
\".format(data['name'])\n return (\"The {expression} is not a list, but you used \"\n \"it in the iteration on line {line}. You should only iterate \"\n \"over sequences like lists.\"\n ).format(line=data['position']['line'], expression=expression)\n elif issue == 'Iterating over empty list':\n if 'name' not in data or data['name'] is None:\n expression = \"expression\"\n else:\n expression = \"variable {}
\".format(data['name'])\n return (\"The {expression} was set as an empty list, \"\n \"and then you attempted to use it in an iteration on line \"\n \"{line}. You should only iterate over non-empty lists.\"\n ).format(line=data['position']['line'], expression=expression)\n elif issue == 'Incompatible types':\n op = OPERATION_DESCRIPTION.get(data['operation'].__class__,\n str(data['operation']))\n left = data['left'].singular_name\n right = data['right'].singular_name\n line = data['position']['line']\n return (\"You used {op} operation with {left} and {right} on line \"\n \"{line}. But you can't do that with that operator. Make \"\n \"sure both sides of the operator are the right type.\"\n ).format(op=op, left=left, right=right, line=line)\n elif issue == \"Parameter Type Mismatch\":\n name = data['parameter_name']\n parameter = data['parameter'].singular_name\n argument = data['argument'].singular_name\n line = data['position']['line']\n return (\"You defined the parameter {name}
on line {line} \"\n \"as {parameter}. However, the argument passed to that parameter \"\n \"was {argument}. The formal parameter type must match the argument's type.\"\n ).format(name=name, argument=argument, parameter=parameter, line=line)\n elif issue == 'Read out of scope':\n return (\"You attempted to read a variable from a different scope on \"\n \"line {line}. You should only use variables inside the \"\n \"function they were declared in.\"\n ).format(line=data['position']['line'])\n return False\n\n\n'''\nTODO: Finish these checks\n\"Empty Body\": [], # Any use of pass on its own\n\"Malformed Conditional\": [], # An if/else with empty else or if\n\"Unnecessary Pass\": [], # Any use of pass\n\"Append to non-list\": [], # Attempted to use the append method on a non-list\n\"Used iteration list\": [], #\n\"Unused iteration variable\": [], #\n\"Type changes\": [], #\n\"Unknown functions\": [], #\n\"Not a function\": [], # Attempt to call non-function as function\n\"Recursive Call\": [],\n\"Incorrect Arity\": [],\n\"Aliased built-in\": [], #\n\"Method not in Type\": [], # A method was used that didn't exist for that type\n\"Submodule not found\": [],\n\"Module not found\": [],\n\"Else on loop body\": [], # Used an Else on a For or While\n'''\n","src/lib/pedal/tifa/state.py":"def check_trace(state):\n past_types = [state.type]\n for past_state in state.trace:\n past_types.extend(check_trace(past_state))\n return past_types\n\n\nclass State:\n \"\"\"\n A representation of a variable at a particular point in time of the program.\n\n Attributes:\n name (str): The name of the variable, without its scope chain\n trace (list of State): A recursive definition of previous States for\n this State.\n type (Type): The current type of this variable.\n method (str): One of 'store', 'read', (TODO). Indicates the change that\n occurred to this variable at this State.\n position (dict): A Position dictionary indicating where this State\n change occurred in the source code.\n read (str): One of 'yes', 'no', or 'maybe'. Indicates if this variable\n has been read since it was last changed. If merged from a\n diverging path, it is possible that it was \"maybe\" read.\n set (str): One of 'yes', 'no', or 'maybe'. Indicates if this variable\n has been set since it was last read. If merged from a\n diverging path, it is possible that it was \"maybe\" changed.\n over (str): One of 'yes', 'no', or 'maybe'. Indicates if this variable\n has been overwritten since it was last set. If merged from a\n diverging path, it is possible that it was \"maybe\" changed.\n over_position (dict): A Position indicating where the State was\n previously set versus when it was overwritten.\n \"\"\"\n\n def __init__(self, name, trace, type, method, position,\n read='maybe', set='maybe', over='maybe', over_position=None):\n self.name = name\n self.trace = trace\n self.type = type\n self.method = method\n self.position = position\n self.over_position = over_position\n self.read = read\n self.set = set\n self.over = over\n\n def copy(self, method, position):\n \"\"\"\n Make a copy of this State, copying this state into the new State's trace\n \"\"\"\n return State(self.name, [self], self.type, method, position,\n self.read, self.set, self.over, self.over_position)\n\n def __str__(self):\n \"\"\"\n Create a string representation of this State.\n \"\"\"\n return \"{method}(r:{read},s:{set},o:{over},{type})\".format(\n method=self.method,\n read=self.read[0],\n set=self.set[0],\n over=self.over[0],\n type=self.type.__class__.__name__\n )\n\n def __repr__(self):\n \"\"\"\n Create a string representation of this State.\n \"\"\"\n return str(self)\n\n def was_type(self, a_type):\n \"\"\"\n Retrieve all the types that this variable took on over its entire\n trace.\n \"\"\"\n past_types = check_trace(self)\n return any(past_type.is_equal(a_type) for past_type in past_types)\n","src/lib/pedal/tifa/tifa.py":"import ast\nfrom pprint import pprint\n\nfrom pedal.report import MAIN_REPORT\n\nfrom pedal.tifa.type_definitions import (UnknownType, RecursedType,\n FunctionType, ClassType, InstanceType,\n NumType, NoneType, BoolType, TupleType,\n ListType, StrType, GeneratorType,\n DictType, ModuleType, SetType,\n # FileType, DayType, TimeType,\n type_from_json, type_to_literal, get_tifa_type,\n LiteralNum, LiteralBool,\n LiteralNone, LiteralStr,\n LiteralTuple)\nfrom pedal.tifa.builtin_definitions import (get_builtin_module, get_builtin_function)\nfrom pedal.tifa.type_operations import (merge_types, are_types_equal,\n VALID_UNARYOP_TYPES, VALID_BINOP_TYPES,\n ORDERABLE_TYPES, INDEXABLE_TYPES)\nfrom pedal.tifa.identifier import Identifier\nfrom pedal.tifa.state import State\nfrom pedal.tifa.messages import _format_message\n\n__all__ = ['Tifa']\n\n\nclass Tifa(ast.NodeVisitor):\n \"\"\"\n TIFA Class for traversing an AST and finding common issues.\n\n Args:\n python_3 (bool): Whether to parse the code in regular PYTHON_3 mode or\n the modified AST that Skulpt uses.\n report (Report): The report object to store data and feedback in. If\n left None, defaults to the global MAIN_REPORT.\n \"\"\"\n\n def __init__(self, python_3=True, report=None):\n if report is None:\n report = MAIN_REPORT\n self.report = report\n self._initialize_report()\n\n def _initialize_report(self):\n \"\"\"\n Initialize a successful report with possible set of issues.\n \"\"\"\n self.report['tifa'] = {\n 'success': True,\n 'variables': {},\n 'top_level_variables': {},\n 'issues': {}\n }\n\n def report_issue(self, issue, data=None):\n \"\"\"\n Report the given issue with associated metadata, including the position\n if not explicitly included.\n \"\"\"\n if data is None:\n data = {}\n if 'position' not in data:\n data['position'] = self.locate()\n data['message'] = _format_message(issue, data)\n if issue not in self.report['tifa']['issues']:\n self.report['tifa']['issues'][issue] = []\n self.report['tifa']['issues'][issue].append(data)\n if data['message']:\n self.report.attach(issue, category='Analyzer', tool='TIFA',\n mistake=data)\n\n def locate(self, node=None):\n \"\"\"\n Return a dictionary representing the current location within the\n AST.\n\n Returns:\n Position dict: A dictionary with the fields 'column' and 'line',\n indicating the current position in the source code.\n \"\"\"\n if node is None:\n if self.node_chain:\n node = self.node_chain[-1]\n else:\n node = self.final_node\n return {'column': node.col_offset, 'line': node.lineno}\n\n def process_code(self, code, filename=\"__main__\"):\n \"\"\"\n Processes the AST of the given source code to generate a report.\n\n Args:\n code (str): The Python source code\n filename (str): The filename of the source code (defaults to __main__)\n Returns:\n Report: The successful or successful report object\n \"\"\"\n # Code\n self.source = code.split(\"\\n\") if code else []\n filename = filename\n\n # Attempt parsing - might fail!\n try:\n ast_tree = ast.parse(code, filename)\n except Exception as error:\n self.report['tifa']['success'] = False\n self.report['tifa']['error'] = error\n self.report.attach('tifa_error', category='Analyzer', tool='TIFA',\n mistake={\n 'message': \"Could not parse code\",\n 'error': error\n })\n return self.report['tifa']\n try:\n return self.process_ast(ast_tree)\n except Exception as error:\n self.report['tifa']['success'] = False\n self.report['tifa']['error'] = error\n self.report.attach('tifa_error', category='Analyzer', tool='TIFA',\n mistake={\n 'message': \"Could not process code: \"+str(error),\n 'error': error\n })\n return self.report['tifa']\n\n def process_ast(self, ast_tree):\n \"\"\"\n Given an AST, actually performs the type and flow analyses to return a \n report.\n\n Args:\n ast (Ast): The AST object\n Returns:\n Report: The final report object created (also available as a field).\n \"\"\"\n self._reset()\n # Traverse every node\n self.visit(ast_tree)\n\n # Check afterwards\n self.report['tifa']['variables'] = self.name_map\n self._finish_scope()\n\n # Collect top level variables\n self._collect_top_level_variables()\n # print(self.report['variables'])\n\n return self.report['tifa']\n\n def _collect_top_level_variables(self):\n \"\"\"\n Walk through the variables and add any at the top level to the\n top_level_variables field of the report.\n \"\"\"\n top_level_variables = self.report['tifa']['top_level_variables']\n main_path_vars = self.name_map[self.path_chain[0]]\n for full_name in main_path_vars:\n split_name = full_name.split(\"/\")\n if len(split_name) == 2 and split_name[0] == str(self.scope_chain[0]):\n name = split_name[1]\n top_level_variables[name] = main_path_vars[full_name]\n\n def _reset(self):\n \"\"\"\n Reinitialize fields for maintaining the system\n \"\"\"\n # Unique Global IDs\n self.path_id = 0\n self.scope_id = 0\n self.ast_id = 0\n\n # Human readable names\n self.path_names = ['*Module']\n self.scope_names = ['*Module']\n self.node_chain = []\n\n # Complete record of all Names\n self.scope_chain = [self.scope_id]\n self.path_chain = [self.path_id]\n self.name_map = {}\n self.name_map[self.path_id] = {}\n self.definition_chain = []\n self.path_parents = {}\n self.final_node = None\n self.class_scopes = {}\n\n def find_variable_scope(self, name):\n \"\"\"\n Walk through this scope and all enclosing scopes, finding the relevant\n identifier given by `name`.\n\n Args:\n name (str): The name of the variable\n Returns:\n Identifier: An Identifier for the variable, which could potentially\n not exist.\n \"\"\"\n for scope_level, scope in enumerate(self.scope_chain):\n for path_id in self.path_chain:\n path = self.name_map[path_id]\n full_name = \"/\".join(map(str, self.scope_chain[scope_level:])) + \"/\" + name\n if full_name in path:\n is_root_scope = (scope_level == 0)\n return Identifier(True, is_root_scope,\n full_name, path[full_name])\n\n return Identifier(False)\n\n def find_variable_out_of_scope(self, name):\n \"\"\"\n Walk through every scope and determine if this variable can be found\n elsewhere (which would be an issue).\n\n Args:\n name (str): The name of the variable\n Returns:\n Identifier: An Identifier for the variable, which could potentially\n not exist.\n \"\"\"\n for path in self.name_map.values():\n for full_name in path:\n unscoped_name = full_name.split(\"/\")[-1]\n if name == unscoped_name:\n return Identifier(True, False, unscoped_name, path[full_name])\n return Identifier(False)\n\n def find_path_parent(self, path_id, name):\n if name in self.name_map[path_id]:\n return Identifier(True, state=self.name_map[path_id][name])\n else:\n path_parent = self.path_parents.get(path_id)\n if path_parent is None:\n return Identifier(False)\n else:\n return self.find_path_parent(path_parent, name)\n\n def _finish_scope(self):\n \"\"\"\n Walk through all the variables present in this scope and ensure that\n they have been read and not overwritten.\n \"\"\"\n path_id = self.path_chain[0]\n for name in self.name_map[path_id]:\n if Tifa.in_scope(name, self.scope_chain):\n state = self.name_map[path_id][name]\n if state.over == 'yes':\n position = state.over_position\n self.report_issue('Overwritten Variable',\n {'name': state.name, 'position': position})\n if state.read == 'no':\n self.report_issue('Unused Variable',\n {'name': state.name, 'type': state.type,\n 'position': state.position})\n\n def visit(self, node):\n \"\"\"\n Process this node by calling its appropriate visit_*\n\n Args:\n node (AST): The node to visit\n Returns:\n Type: The type calculated during the visit.\n \"\"\"\n # Start processing the node\n self.node_chain.append(node)\n self.ast_id += 1\n\n # Actions after return?\n if len(self.scope_chain) > 1:\n return_state = self.find_variable_scope(\"*return\")\n if return_state.exists and return_state.in_scope:\n if return_state.state.set == \"yes\":\n self.report_issue(\"Action after return\")\n\n # No? All good, let's enter the node\n self.final_node = node\n result = ast.NodeVisitor.visit(self, node)\n\n # Pop the node out of the chain\n self.ast_id -= 1\n self.node_chain.pop()\n\n # If a node failed to return something, return the UNKNOWN TYPE\n if result is None:\n return UnknownType()\n else:\n return result\n\n def _visit_nodes(self, nodes):\n \"\"\"\n Visit all the nodes in the given list.\n\n Args:\n nodes (list): A list of values, of which any AST nodes will be\n visited.\n \"\"\"\n for node in nodes:\n if isinstance(node, ast.AST):\n self.visit(node)\n\n def walk_targets(self, targets, type, walker):\n \"\"\"\n Iterate through the targets and call the given function on each one.\n\n Args:\n targets (list of Ast nodes): A list of potential targets to be\n traversed.\n type (Type): The given type to be unraveled and applied to the\n targets.\n walker (Ast Node, Type -> None): A function that will process\n each target and unravel the type.\n \"\"\"\n for target in targets:\n walker(target, type)\n\n def _walk_target(self, target, type):\n \"\"\"\n Recursively apply the type to the target\n\n Args:\n target (Ast): The current AST node to process\n type (Type): The type to apply to this node\n \"\"\"\n if isinstance(target, ast.Name):\n self.store_iter_variable(target.id, type, self.locate(target))\n return target.id\n elif isinstance(target, (ast.Tuple, ast.List)):\n result = None\n for i, elt in enumerate(target.elts):\n elt_type = type.index(LiteralNum(i))\n potential_name = self._walk_target(elt, elt_type)\n if potential_name is not None and result is None:\n result = potential_name\n return result\n \n def visit_AnnAssign(self, node):\n \"\"\"\n TODO: Implement!\n \"\"\"\n pass\n\n def visit_Assign(self, node):\n \"\"\"\n Simple assignment statement:\n __targets__ = __value__\n\n Args:\n node (AST): An Assign node\n Returns:\n None\n \"\"\"\n # Handle value\n value_type = self.visit(node.value)\n # Handle targets\n self._visit_nodes(node.targets)\n\n # TODO: Properly handle assignments with subscripts\n def action(target, type):\n if isinstance(target, ast.Name):\n self.store_variable(target.id, type)\n elif isinstance(target, (ast.Tuple, ast.List)):\n for i, elt in enumerate(target.elts):\n eltType = type.index(LiteralNum(i))\n action(elt, eltType)\n elif isinstance(target, ast.Subscript):\n pass\n elif isinstance(target, ast.Attribute):\n left_hand_type = self.visit(target.value)\n if isinstance(left_hand_type, InstanceType):\n left_hand_type.add_attr(target.attr, type)\n # TODO: Otherwise we attempted to assign to a non-instance\n # TODO: Handle minor type changes (e.g., appending to an inner list)\n\n self.walk_targets(node.targets, value_type, action)\n\n def visit_AugAssign(self, node):\n # Handle value\n right = self.visit(node.value)\n # Handle target\n left = self.visit(node.target)\n # Target is always a Name, Subscript, or Attribute\n name = self.identify_caller(node.target)\n\n # Handle operation\n self.load_variable(name)\n if isinstance(left, UnknownType) or isinstance(right, UnknownType):\n return UnknownType()\n elif type(node.op) in VALID_BINOP_TYPES:\n op_lookup = VALID_BINOP_TYPES[type(node.op)]\n if type(left) in op_lookup:\n op_lookup = op_lookup[type(left)]\n if type(right) in op_lookup:\n op_lookup = op_lookup[type(right)]\n result_type = op_lookup(left, right)\n self.store_variable(name, result_type)\n return result_type\n\n self.report_issue(\"Incompatible types\",\n {\"left\": left, \"right\": right,\n \"operation\": node.op})\n\n def visit_Attribute(self, node):\n # Handle value\n value_type = self.visit(node.value)\n # Handle ctx\n # TODO: Handling contexts\n # Handle attr\n return value_type.load_attr(node.attr, self, node.value, self.locate())\n\n def visit_BinOp(self, node):\n # Handle left and right\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n # Handle operation\n if isinstance(left, UnknownType) or isinstance(right, UnknownType):\n return UnknownType()\n elif type(node.op) in VALID_BINOP_TYPES:\n op_lookup = VALID_BINOP_TYPES[type(node.op)]\n if type(left) in op_lookup:\n op_lookup = op_lookup[type(left)]\n if type(right) in op_lookup:\n op_lookup = op_lookup[type(right)]\n return op_lookup(left, right)\n\n self.report_issue(\"Incompatible types\",\n {\"left\": left, \"right\": right,\n \"operation\": node.op})\n return UnknownType()\n\n def visit_Bool(self, node):\n return BoolType()\n\n def visit_BoolOp(self, node):\n # Handle left and right\n values = []\n for value in node.values:\n values.append(self.visit(value))\n\n # TODO: Truthiness is not supported! Probably need a Union type\n # TODO: Literals used as truthy value\n\n # Handle operation\n return BoolType()\n\n def visit_Call(self, node):\n # Handle func part (Name or Attribute)\n function_type = self.visit(node.func)\n # TODO: Need to grab the actual type in some situations\n callee = self.identify_caller(node)\n\n # Handle args\n arguments = [self.visit(arg) for arg in node.args] if node.args else []\n\n # TODO: Handle keywords\n # TODO: Handle starargs\n # TODO: Handle kwargs\n if isinstance(function_type, FunctionType):\n # Test if we have called this definition before\n if function_type.definition not in self.definition_chain:\n self.definition_chain.append(function_type.definition)\n # Function invocation\n result = function_type.definition(self, function_type, callee,\n arguments, self.locate())\n self.definition_chain.pop()\n return result\n else:\n self.report_issue(\"Recursive Call\", {\"name\": callee})\n elif isinstance(function_type, ClassType):\n constructor = function_type.get_constructor().definition\n self.definition_chain.append(constructor)\n result = constructor(self, constructor, callee, arguments, self.locate())\n self.definition_chain.pop()\n if '__init__' in function_type.fields:\n initializer = function_type.fields['__init__']\n if isinstance(initializer, FunctionType):\n self.definition_chain.append(initializer)\n initializer.definition(self, initializer, result, [result] + arguments, self.locate())\n self.definition_chain.pop()\n return result\n else:\n self.report_issue(\"Not a function\", {\"name\": callee})\n return UnknownType()\n\n def visit_ClassDef(self, node):\n class_name = node.name\n new_class_type = ClassType(class_name)\n self.store_variable(class_name, new_class_type)\n # TODO: Define a new scope definition that executes the body\n # TODO: find __init__, execute that\n definitions_scope = self.scope_chain[:]\n class_scope = Tifa.NewScope(self, definitions_scope, class_type=new_class_type)\n with class_scope:\n self.generic_visit(node)\n\n def visit_Compare(self, node):\n # Handle left and right\n left = self.visit(node.left)\n comparators = [self.visit(compare) for compare in node.comparators]\n\n # Handle ops\n for op, right in zip(node.ops, comparators):\n if isinstance(op, (ast.Eq, ast.NotEq, ast.Is, ast.IsNot)):\n continue\n elif isinstance(op, (ast.Lt, ast.LtE, ast.GtE, ast.Gt)):\n if are_types_equal(left, right):\n if isinstance(left, ORDERABLE_TYPES):\n continue\n elif isinstance(op, (ast.In, ast.NotIn)):\n if isinstance(right, INDEXABLE_TYPES):\n continue\n self.report_issue(\"Incompatible types\",\n {\"left\": left, \"right\": right,\n \"operation\": op})\n return BoolType()\n\n def _visit_collection_loop(self, node):\n # Handle the iteration list\n iter = node.iter\n iter_list_name = None\n if isinstance(iter, ast.Name):\n iter_list_name = iter.id\n if iter_list_name == \"___\":\n self.report_issue(\"Unconnected blocks\",\n {\"position\": self.locate(iter)})\n state = self.iterate_variable(iter_list_name, self.locate(iter))\n iter_type = state.type\n else:\n iter_type = self.visit(iter)\n\n if iter_type.is_empty():\n self.report_issue(\"Iterating over empty list\",\n {\"name\": iter_list_name,\n \"position\": self.locate(iter)})\n\n if not isinstance(iter_type, INDEXABLE_TYPES):\n self.report_issue(\"Iterating over Non-list\",\n {\"name\": iter_list_name,\n \"position\": self.locate(iter)})\n\n iter_subtype = iter_type.index(LiteralNum(0))\n\n # Handle the iteration variable\n iter_variable_name = self._walk_target(node.target, iter_subtype)\n\n if iter_variable_name and iter_list_name:\n if iter_variable_name == iter_list_name:\n self.report_issue(\"Iteration Problem\",\n {\"name\": iter_variable_name,\n \"position\": self.locate(node.target)})\n\n def visit_comprehension(self, node):\n self._visit_collection_loop(node)\n # Handle ifs, unless they're blank (None in Skulpt :)\n if node.ifs:\n self.visit_statements(node.ifs)\n\n def visit_Dict(self, node):\n \"\"\"\n Three types of dictionaries\n - empty\n - uniform type\n - record\n \"\"\"\n type = DictType()\n if not node.keys:\n type.empty = True\n else:\n type.empty = False\n all_literals = True\n keys, values, literals = [], [], []\n for key, value in zip(node.keys, node.values):\n literal = self.get_literal(key)\n key, value = self.visit(key), self.visit(value)\n values.append(value)\n keys.append(key)\n if literal is not None:\n literals.append(literal)\n else:\n all_literals = False\n\n if all_literals:\n type.literals = literals\n type.values = values\n else:\n type.keys = key\n type.values = value\n return type\n\n def visit_DictComp(self, node):\n # TODO: Handle comprehension scope\n for generator in node.generators:\n self.visit(generator)\n keys = self.visit(node.key)\n values = self.visit(node.value)\n return DictType(keys=keys, values=values)\n\n def visit_For(self, node):\n self._visit_collection_loop(node)\n # Handle the bodies\n self.visit_statements(node.body)\n self.visit_statements(node.orelse)\n\n def visit_FunctionDef(self, node):\n # Name\n function_name = node.name\n position = self.locate()\n definitions_scope = self.scope_chain[:]\n\n def definition(tifa, call_type, call_name, parameters, call_position):\n function_scope = Tifa.NewScope(self, definitions_scope)\n with function_scope:\n # Process arguments\n args = node.args.args\n if len(args) != len(parameters):\n self.report_issue('Incorrect Arity', {\"position\": position})\n # TODO: Handle special types of parameters\n for arg, parameter in zip(args, parameters):\n name = arg.arg\n if arg.annotation:\n self.visit(arg.annotation)\n annotation = get_tifa_type(arg.annotation, {})\n # TODO: Check that arg.type and parameter type match!\n if not are_types_equal(annotation, parameter):\n self.report_issue(\"Parameter Type Mismatch\",\n {\"parameter\": annotation, \"parameter_name\": name,\n \"argument\": parameter})\n if parameter is not None:\n parameter = parameter.clone_mutably()\n self.store_variable(name, parameter, position)\n if len(args) < len(parameters):\n for undefined_parameter in parameters[len(args):]:\n self.store_variable(name, UnknownType(), position)\n self.visit_statements(node.body)\n return_state = self.find_variable_scope(\"*return\")\n return_value = NoneType()\n # TODO: Figure out if we are not returning something when we should\n # If the pseudo variable exists, we load it and get its type\n if return_state.exists and return_state.in_scope:\n return_state = self.load_variable(\"*return\", call_position)\n return_value = return_state.type\n if node.returns:\n #self.visit(node.returns)\n returns = get_tifa_type(node.returns, {})\n if not are_types_equal(return_value, returns):\n self.report_issue(\"Multiple Return Types\",\n {\"expected\": returns.singular_name,\n \"actual\": return_value.singular_name,\n \"position\": return_state.position})\n return return_value\n\n function = FunctionType(definition=definition, name=function_name)\n self.store_variable(function_name, function)\n return function\n\n def visit_GeneratorExp(self, node):\n # TODO: Handle comprehension scope\n for generator in node.generators:\n self.visit(generator)\n return GeneratorType(self.visit(node.elt))\n\n def visit_If(self, node):\n # Visit the conditional\n self.visit(node.test)\n\n if len(node.orelse) == 1 and isinstance(node.orelse[0], ast.Pass):\n self.report_issue(\"Malformed Conditional\")\n elif len(node.body) == 1 and isinstance(node.body[0], ast.Pass):\n if node.orelse:\n self.report_issue(\"Malformed Conditional\")\n\n # Visit the bodies\n this_path_id = self.path_chain[0]\n if_path = Tifa.NewPath(self, this_path_id, \"i\")\n with if_path:\n for statement in node.body:\n self.visit(statement)\n else_path = Tifa.NewPath(self, this_path_id, \"e\")\n with else_path:\n for statement in node.orelse:\n self.visit(statement)\n\n # Combine two paths into one\n # Check for any names that are on the IF path\n self.merge_paths(this_path_id, if_path.id, else_path.id)\n\n def visit_IfExp(self, node):\n # Visit the conditional\n self.visit(node.test)\n\n # Visit the body\n body = self.visit(node.body)\n\n # Visit the orelse\n orelse = self.visit(node.orelse)\n\n if are_types_equal(body, orelse):\n return body\n\n # TODO: Union type?\n return UnknownType()\n\n def visit_Import(self, node):\n # Handle names\n for alias in node.names:\n asname = alias.asname or alias.name\n module_type = self.load_module(alias.name)\n self.store_variable(asname, module_type)\n\n def visit_ImportFrom(self, node):\n # Handle names\n for alias in node.names:\n if node.module is None:\n asname = alias.asname or alias.name\n module_type = self.load_module(alias.name)\n else:\n module_name = node.module\n asname = alias.asname or alias.name\n module_type = self.load_module(module_name)\n name_type = module_type.load_attr(alias.name, self,\n callee_position=self.locate())\n self.store_variable(asname, name_type)\n\n def visit_Lambda(self, node):\n # Name\n position = self.locate()\n definitions_scope = self.scope_chain[:]\n\n def definition(tifa, call_type, call_name, parameters, call_position):\n function_scope = Tifa.NewScope(self, definitions_scope)\n with function_scope:\n # Process arguments\n args = node.args.args\n if len(args) != len(parameters):\n self.report_issue('Incorrect Arity', {\"position\": position})\n # TODO: Handle special types of parameters\n for arg, parameter in zip(args, parameters):\n name = arg.arg\n if parameter is not None:\n parameter = parameter.clone_mutably()\n self.store_variable(name, parameter, position)\n if len(args) < len(parameters):\n for undefined_parameter in parameters[len(args):]:\n self.store_variable(name, UnknownType(), position)\n return_value = self.visit(node.body)\n return return_value\n\n return FunctionType(definition=definition)\n\n def visit_List(self, node):\n type = ListType()\n if node.elts:\n type.empty = False\n # TODO: confirm homogenous subtype\n for elt in node.elts:\n type.subtype = self.visit(elt)\n else:\n type.empty = True\n return type\n\n def visit_ListComp(self, node):\n # TODO: Handle comprehension scope\n for generator in node.generators:\n self.visit(generator)\n return ListType(self.visit(node.elt))\n\n def visit_Name(self, node):\n name = node.id\n if name == \"___\":\n self.report_issue(\"Unconnected blocks\")\n if isinstance(node.ctx, ast.Load):\n if name == \"True\" or name == \"False\":\n return BoolType()\n elif name == \"None\":\n return NoneType()\n else:\n variable = self.find_variable_scope(name)\n builtin = get_builtin_function(name)\n if not variable.exists and builtin:\n return builtin\n else:\n state = self.load_variable(name)\n return state.type\n else:\n variable = self.find_variable_scope(name)\n if variable.exists:\n return variable.state.type\n else:\n return UnknownType()\n\n def visit_Num(self, node):\n return NumType()\n\n def visit_Return(self, node):\n if len(self.scope_chain) == 1:\n self.report_issue(\"Return outside function\")\n if node.value is not None:\n self.return_variable(self.visit(node.value))\n else:\n self.return_variable(NoneType())\n\n def visit_SetComp(self, node):\n # TODO: Handle comprehension scope\n for generator in node.generators:\n self.visit(generator)\n return SetType(self.visit(node.elt))\n\n def visit_statements(self, nodes):\n # TODO: Check for pass in the middle of a series of statement\n if any(isinstance(node, ast.Pass) for node in nodes):\n pass\n return [self.visit(statement) for statement in nodes]\n\n def visit_Str(self, node):\n if node.s == \"\":\n return StrType(True)\n else:\n return StrType(False)\n\n def visit_Subscript(self, node):\n # Handle value\n value_type = self.visit(node.value)\n # Handle slice\n if isinstance(node.slice, ast.Index):\n literal = self.get_literal(node.slice.value)\n if literal is None:\n dynamic_literal = type_to_literal(self.visit(node.slice.value))\n return value_type.index(dynamic_literal)\n else:\n return value_type.index(literal)\n elif isinstance(node.slice, ast.Slice):\n if node.slice.lower is not None:\n self.visit(node.slice.lower)\n if node.slice.upper is not None:\n self.visit(node.slice.upper)\n if node.slice.step is not None:\n self.visit(node.slice.step)\n return value_type\n\n def visit_Tuple(self, node):\n type = TupleType()\n if not node.elts:\n type.empty = True\n type.subtypes = []\n else:\n type.empty = False\n # TODO: confirm homogenous subtype\n type.subtypes = [self.visit(elt) for elt in node.elts]\n return type\n\n def visit_UnaryOp(self, node):\n # Handle operand\n operand = self.visit(node.operand)\n\n if isinstance(node.op, ast.Not):\n return BoolType()\n elif isinstance(operand, UnknownType):\n return UnknownType()\n elif type(node.op) in VALID_UNARYOP_TYPES:\n op_lookup = VALID_UNARYOP_TYPES[type(node.op)]\n if type(operand) in op_lookup:\n return op_lookup[type(operand)]()\n return UnknownType()\n\n def visit_While(self, node):\n # Visit conditional\n self.visit(node.test)\n\n # Visit the bodies\n this_path_id = self.path_id\n # One path is that we never enter the body\n empty_path = Tifa.NewPath(self, this_path_id, \"e\")\n with empty_path:\n pass\n # Another path is that we loop through the body and check the test again\n body_path = Tifa.NewPath(self, this_path_id, \"w\")\n with body_path:\n for statement in node.body:\n self.visit(statement)\n # Revisit conditional\n self.visit(node.test)\n # If there's else bodies (WEIRD) then we should check them afterwards\n if node.orelse:\n self.report_issue(\"Else on loop body\")\n for statement in node.orelse:\n self.visit(statement)\n\n # Combine two paths into one\n # Check for any names that are on the IF path\n self.merge_paths(this_path_id, body_path.id, empty_path.id)\n\n def visit_With(self, node):\n for item in node.items:\n type_value = self.visit(item.context_expr)\n self.visit(item.optional_vars)\n self._walk_target(item.optional_vars, type_value)\n # Handle the bodies\n self.visit_statements(node.body)\n\n def _scope_chain_str(self, name=None):\n \"\"\"\n Convert the current scope chain to a string representation (divided \n by \"/\").\n\n Returns:\n str: String representation of the scope chain.\n \"\"\"\n if name:\n return \"/\".join(map(str, self.scope_chain)) + \"/\" + name\n else:\n return \"/\".join(map(str, self.scope_chain))\n\n def identify_caller(self, node):\n \"\"\"\n Figures out the variable that was used to kick off this call,\n which is almost always the relevant Name to track as being updated.\n If the origin wasn't a Name, nothing will need to be updated so None\n is returned instead.\n\n TODO: Is this sufficient?\n\n Args:\n node (AST): An AST node\n Returns:\n str or None: The name of the variable or None if no origin could\n be found.\n \"\"\"\n if isinstance(node, ast.Name):\n return node.id\n elif isinstance(node, ast.Call):\n return self.identify_caller(node.func)\n elif isinstance(node, (ast.Attribute, ast.Subscript)):\n return self.identify_caller(node.value)\n return None\n\n def iterate_variable(self, name, position=None):\n \"\"\"\n Update the variable by iterating through it - this doesn't do anything\n fancy yet.\n \"\"\"\n return self.load_variable(name, position)\n\n def store_iter_variable(self, name, type, position=None):\n state = self.store_variable(name, type, position)\n state.read = 'yes'\n return state\n\n def return_variable(self, type):\n\n return self.store_variable(\"*return\", type)\n\n def append_variable(self, name, type, position=None):\n return self.store_variable(name, type, position)\n\n def store_variable(self, name, type, position=None):\n \"\"\"\n Update the variable with the given name to now have the new type.\n\n Args:\n name (str): The unqualified name of the variable. The variable will\n be assumed to be in the current scope.\n type (Type): The new type of this variable.\n Returns:\n State: The new state of the variable.\n \"\"\"\n if position is None:\n position = self.locate()\n full_name = self._scope_chain_str(name)\n current_path = self.path_chain[0]\n variable = self.find_variable_scope(name)\n if not variable.exists:\n # Create a new instance of the variable on the current path\n new_state = State(name, [], type, 'store', position,\n read='no', set='yes', over='no')\n self.name_map[current_path][full_name] = new_state\n else:\n new_state = self.trace_state(variable.state, \"store\", position)\n if not variable.in_scope:\n self.report_issue(\"Write out of scope\", {'name': name})\n # Type change?\n if not are_types_equal(type, variable.state.type):\n self.report_issue(\"Type changes\",\n {'name': name, 'old': variable.state.type,\n 'new': type, 'position': position})\n new_state.type = type\n # Overwritten?\n if variable.state.set == 'yes' and variable.state.read == 'no':\n new_state.over_position = position\n new_state.over = 'yes'\n else:\n new_state.set = 'yes'\n new_state.read = 'no'\n self.name_map[current_path][full_name] = new_state\n # If this is a class scope...\n current_scope = self.scope_chain[0]\n if current_scope in self.class_scopes:\n self.class_scopes[current_scope].add_attr(name, new_state.type)\n return new_state\n\n def load_variable(self, name, position=None):\n \"\"\"\n Retrieve the variable with the given name.\n\n Args:\n name (str): The unqualified name of the variable. If the variable is\n not found in the current scope or an enclosing sope, all\n other scopes will be searched to see if it was read out\n of scope.\n Returns:\n State: The current state of the variable.\n \"\"\"\n full_name = self._scope_chain_str(name)\n current_path = self.path_chain[0]\n variable = self.find_variable_scope(name)\n if position is None:\n position = self.locate()\n if not variable.exists:\n out_of_scope_var = self.find_variable_out_of_scope(name)\n # Create a new instance of the variable on the current path\n if out_of_scope_var.exists:\n self.report_issue(\"Read out of scope\", {'name': name})\n else:\n self.report_issue(\"Initialization Problem\", {'name': name})\n new_state = State(name, [], UnknownType(), 'load', position,\n read='yes', set='no', over='no')\n self.name_map[current_path][full_name] = new_state\n else:\n new_state = self.trace_state(variable.state, \"load\", position)\n if variable.state.set == 'no':\n self.report_issue(\"Initialization Problem\", {'name': name})\n if variable.state.set == 'maybe':\n self.report_issue(\"Possible Initialization Problem\", {'name': name})\n new_state.read = 'yes'\n if not variable.in_scope:\n self.name_map[current_path][variable.scoped_name] = new_state\n else:\n self.name_map[current_path][full_name] = new_state\n return new_state\n\n def load_module(self, chain):\n \"\"\"\n Finds the module in the set of available modules.\n\n Args:\n chain (str): A chain of module imports (e.g., \"matplotlib.pyplot\")\n Returns:\n ModuleType: The specific module with its members, or an empty\n module type.\n \"\"\"\n module_names = chain.split('.')\n potential_module = get_builtin_module(module_names[0])\n if potential_module is not None:\n base_module = potential_module\n for module in module_names:\n if (isinstance(base_module, ModuleType) and\n module in base_module.submodules):\n base_module = base_module.submodules[module]\n else:\n self.report_issue(\"Module not found\", {\"name\": chain})\n return base_module\n else:\n try:\n actual_module = __import__(chain, globals(), {},\n ['_tifa_definitions'])\n definitions = actual_module._tifa_definitions()\n return type_from_json(definitions)\n except Exception as e:\n self.report_issue(\"Module not found\",\n {\"name\": chain, \"error\": str(e)})\n return ModuleType()\n\n def combine_states(self, left, right):\n state = State(left.name, [left], left.type, 'branch', self.locate(),\n read=left.read, set=left.set, over=left.over,\n over_position=left.over_position)\n if right is None:\n state.read = 'no' if left.read == 'no' else 'maybe'\n state.set = 'no' if left.set == 'no' else 'maybe'\n state.over = 'no' if left.over == 'no' else 'maybe'\n else:\n if not are_types_equal(left.type, right.type):\n self.report_issue(\"Type changes\", {'name': left.name,\n 'old': left.type,\n 'new': right.type})\n state.read = Tifa.match_rso(left.read, right.read)\n state.set = Tifa.match_rso(left.set, right.set)\n state.over = Tifa.match_rso(left.over, right.over)\n if left.over == 'no':\n state.over_position = right.over_position\n state.trace.append(right)\n return state\n\n def merge_paths(self, parent_path_id, left_path_id, right_path_id):\n \"\"\"\n Combines any variables on the left and right path into the parent\n name space.\n\n Args:\n parent_path_id (int): The parent path of the left and right branches\n left_path_id (int): One of the two paths\n right_path_id (int): The other of the two paths.\n \"\"\"\n # Combine two paths into one\n # Check for any names that are on the IF path\n for left_name in self.name_map[left_path_id]:\n left_state = self.name_map[left_path_id][left_name]\n right_identifier = self.find_path_parent(right_path_id, left_name)\n if right_identifier.exists:\n # Was on both IF and ELSE path\n right_state = right_identifier.state\n else:\n # Was only on IF path, potentially on the parent path\n right_state = self.name_map[parent_path_id].get(left_name)\n combined = self.combine_states(left_state, right_state)\n self.name_map[parent_path_id][left_name] = combined\n # Check for names that are on the ELSE path but not the IF path\n for right_name in self.name_map[right_path_id]:\n if right_name not in self.name_map[left_path_id]:\n right_state = self.name_map[right_path_id][right_name]\n # Potentially on the parent path\n parent_state = self.name_map[parent_path_id].get(right_name)\n combined = self.combine_states(right_state, parent_state)\n self.name_map[parent_path_id][right_name] = combined\n\n def trace_state(self, state, method, position):\n \"\"\"\n Makes a copy of the given state with the given method type.\n\n Args:\n state (State): The state to copy (as in, we trace a copy of it!)\n method (str): The operation being applied to the state.\n Returns:\n State: The new State\n \"\"\"\n return state.copy(method, position)\n\n @staticmethod\n def in_scope(full_name, scope_chain):\n \"\"\"\n Determine if the fully qualified variable name is in the given scope\n chain.\n\n Args:\n full_name (str): A fully qualified variable name\n scope_chain (list): A representation of a scope chain.\n Returns:\n bool: Whether the variable lives in this scope\n \"\"\"\n # Get this entity's full scope chain\n name_scopes = full_name.split(\"/\")[:-1]\n # against the reverse scope chain\n checking_scopes = [str(s) for s in scope_chain[::-1]]\n return name_scopes == checking_scopes\n\n @staticmethod\n def match_rso(left, right):\n if left == right:\n return left\n else:\n return \"maybe\"\n\n def get_literal(self, node):\n if isinstance(node, ast.Num):\n return LiteralNum(node.n)\n elif isinstance(node, ast.Str):\n return LiteralStr(node.s)\n elif isinstance(node, ast.Tuple):\n values = []\n for elt in node.elts:\n subvalue = self.get_literal(elt)\n if subvalue is not None:\n values.append(subvalue)\n else:\n return None\n return LiteralTuple(values)\n elif isinstance(node, ast.Name):\n if node.id == \"None\":\n return LiteralNone()\n elif node.id == \"False\":\n return LiteralBool(False)\n elif node.id == \"True\":\n return LiteralBool(True)\n return None\n\n class NewPath:\n \"\"\"\n Context manager for entering and leaving execution paths (e.g., if\n statements).)\n\n Args:\n tifa (Tifa): The tifa instance, so we can modify some of its\n properties that track variables and paths.\n origin_path (int): The path ID parent to this one.\n name (str): The symbolic name of this path, typically 'i' for an IF\n body and 'e' for ELSE body.\n\n Fields:\n id (int): The path ID of this path\n \"\"\"\n\n def __init__(self, tifa, origin_path, name):\n self.tifa = tifa\n self.name = name\n self.origin_path = origin_path\n self.id = None\n\n def __enter__(self):\n self.tifa.path_id += 1\n self.id = self.tifa.path_id\n self.tifa.path_names.append(str(self.id) + self.name)\n self.tifa.path_chain.insert(0, self.id)\n self.tifa.name_map[self.id] = {}\n self.tifa.path_parents[self.id] = self.origin_path\n\n def __exit__(self, type, value, traceback):\n self.tifa.path_names.pop()\n self.tifa.path_chain.pop(0)\n\n class NewScope:\n \"\"\"\n Context manager for entering and leaving scopes (e.g., inside of\n function calls).\n\n Args:\n tifa (Tifa): The tifa instance, so we can modify some of its\n properties that track variables and paths.\n definitions_scope_chain (list of int): The scope chain of the\n definition\n \"\"\"\n\n def __init__(self, tifa, definitions_scope_chain, class_type=None):\n self.tifa = tifa\n self.definitions_scope_chain = definitions_scope_chain\n self.class_type = class_type\n\n def __enter__(self):\n # Manage scope\n self.old_scope = self.tifa.scope_chain[:]\n # Move to the definition's scope chain\n self.tifa.scope_chain = self.definitions_scope_chain[:]\n # And then enter its body's new scope\n self.tifa.scope_id += 1\n self.tifa.scope_chain.insert(0, self.tifa.scope_id)\n # Register as class potentially\n if self.class_type is not None:\n self.class_type.scope_id = self.tifa.scope_id\n self.tifa.class_scopes[self.tifa.scope_id] = self.class_type\n\n def __exit__(self, type, value, traceback):\n # Finish up the scope\n self.tifa._finish_scope()\n # Leave the body\n self.tifa.scope_chain.pop(0)\n # Restore the scope\n self.tifa.scope_chain = self.old_scope\n","src/lib/pedal/tifa/type_definitions.py":"import ast\n\n\ndef are_literals_equal(first, second):\n if first is None or second is None:\n return False\n elif not isinstance(first, type(second)):\n return False\n else:\n if isinstance(first, LiteralTuple):\n if len(first.value) != len(second.value):\n return False\n for l, s in zip(first.value, second.value):\n if not are_literals_equal(l, s):\n return False\n return True\n elif not isinstance(first, LiteralValue):\n return True\n else:\n return first.value == second.value\n\n\nclass LiteralValue:\n \"\"\"\n A special literal representation of a value, used to represent access on\n certain container types.\n \"\"\"\n\n def __init__(self, value):\n self.value = value\n\n\nclass LiteralNum(LiteralValue):\n \"\"\"\n Used to capture indexes of containers.\n \"\"\"\n\n def type(self):\n return NumType()\n\n\nclass LiteralBool(LiteralValue):\n def type(self):\n return BoolType()\n\n\nclass LiteralStr(LiteralValue):\n def type(self):\n return StrType()\n\n\nclass LiteralTuple(LiteralValue):\n def type(self):\n return TupleType(self.value)\n\n\nclass LiteralNone(LiteralValue):\n def type(self):\n return LiteralNone()\n\n\ndef literal_from_json(val):\n if val['type'] == 'LiteralStr':\n return LiteralStr(val['value'])\n elif val['type'] == 'LiteralNum':\n return LiteralNum(val['value'])\n elif val['type'] == 'LiteralBool':\n return LiteralBool(val['value'])\n\n\ndef _dict_extends(d1, d2):\n \"\"\"\n Helper function to create a new dictionary with the contents of the two\n given dictionaries. Does not modify either dictionary, and the values are\n copied shallowly. If there are repeates, the second dictionary wins ties.\n\n The function is written to ensure Skulpt compatibility.\n\n Args:\n d1 (dict): The first dictionary\n d2 (dict): The second dictionary\n \"\"\"\n d3 = {}\n for key, value in d1.items():\n d3[key] = value\n for key, value in d2.items():\n d3[key] = value\n return d3\n\n\nclass Type:\n \"\"\"\n Parent class for all other types, used to provide a common interface.\n\n TODO: Handle more complicated object-oriented types and custom types\n (classes).\n \"\"\"\n fields = {}\n immutable = False\n singular_name = 'a type'\n\n def clone(self):\n return self.__class__()\n\n def __str__(self):\n return str(self.__class__.__name__)\n\n def clone_mutably(self):\n if self.immutable:\n return self.clone()\n else:\n return self\n\n def index(self, i):\n return self.clone()\n\n def load_attr(self, attr, tifa, callee=None, callee_position=None):\n if attr in self.fields:\n return self.fields[attr]\n # TODO: Handle more kinds of common mistakes\n if attr == \"append\":\n tifa.report_issue('Append to non-list',\n {'name': tifa.identify_caller(callee),\n 'position': callee_position, 'type': self})\n return UnknownType()\n\n def is_empty(self):\n return True\n\n def is_equal(self, other):\n # TODO: Make this more sophisticated!\n if type(self) not in TYPE_LOOKUPS:\n return False\n return other in TYPE_LOOKUPS[type(self)]\n\n def is_instance(self, other):\n # TODO: Implement this correctly\n return self.is_equal(other)\n\n\nclass UnknownType(Type):\n \"\"\"\n A special type used to indicate an unknowable type.\n \"\"\"\n\n\nclass RecursedType(Type):\n \"\"\"\n A special type used as a placeholder for the result of a\n recursive call that we have already process. This type will\n be dominated by any actual types, but will not cause an issue.\n \"\"\"\n\n\nclass FunctionType(Type):\n \"\"\"\n \n Special values for `returns`:\n identity: Returns the first argument's type\n element: Returns the first argument's first element's type\n void: Returns the NoneType\n \"\"\"\n singular_name = 'a function'\n\n def __init__(self, definition=None, name=\"*Anonymous\", returns=None):\n if returns is not None and definition is None:\n if returns == 'identity':\n def definition(ti, ty, na, args, ca):\n if args:\n return args[0].clone()\n return UnknownType()\n elif returns == 'element':\n def definition(ti, ty, na, args, ca):\n if args:\n return args[0].index(0)\n return UnknownType()\n elif returns == 'void':\n def definition(ti, ty, na, args, ca):\n return NoneType()\n else:\n def definition(ti, ty, na, args, ca):\n return returns.clone()\n self.definition = definition\n self.name = name\n\n\nclass ClassType(Type):\n singular_name = 'a class'\n\n def __init__(self, name):\n self.name = name\n self.fields = {}\n self.scope_id = None\n\n def add_attr(self, name, type):\n self.fields[name] = type\n\n def get_constructor(self):\n i = InstanceType(self)\n return FunctionType(name='__init__', returns=i)\n\n def clone(self):\n return ClassType(self.name)\n\n\nclass InstanceType(Type):\n def __init__(self, parent):\n self.parent = parent\n self.fields = parent.fields\n\n def __str__(self):\n return \"InstanceTypeOf\" + str(self.parent.name)\n\n def clone(self):\n return InstanceType(self.parent)\n\n def add_attr(self, name, type):\n # TODO: What if this is a type change?\n self.parent.add_attr(name, type)\n\n\nclass NumType(Type):\n singular_name = 'a number'\n immutable = True\n\n def index(self, i):\n return UnknownType()\n\n\nclass NoneType(Type):\n singular_name = 'a None'\n immutable = True\n\n\nclass BoolType(Type):\n singular_name = 'a boolean'\n immutable = True\n\n\nclass TupleType(Type):\n \"\"\"\n \"\"\"\n singular_name = 'a tuple'\n\n def __init__(self, subtypes=None):\n if subtypes is None:\n subtypes = []\n self.subtypes = subtypes\n\n def index(self, i):\n if isinstance(i, LiteralNum):\n return self.subtypes[i.value].clone()\n else:\n return self.subtypes[i].clone()\n\n def clone(self):\n return TupleType([t.clone() for t in self.subtypes])\n\n immutable = True\n\n\nclass ListType(Type):\n singular_name = 'a list'\n\n def __init__(self, subtype=None, empty=True):\n if subtype is None:\n subtype = UnknownType()\n self.subtype = subtype\n self.empty = empty\n\n def index(self, i):\n return self.subtype.clone()\n\n def clone(self):\n return ListType(self.subtype.clone(), self.empty)\n\n def load_attr(self, attr, tifa, callee=None, callee_position=None):\n if attr == 'append':\n def _append(tifa, function_type, callee, args, position):\n if args:\n cloned_type = ListType(subtype=args[0].clone(),\n empty=False)\n if callee:\n tifa.append_variable(callee, cloned_type, position)\n self.empty = False\n self.subtype = args[0]\n\n return FunctionType(_append, 'append')\n return Type.load_attr(self, attr, tifa, callee, callee_position)\n\n def is_empty(self):\n return self.empty\n\n\nclass StrType(Type):\n singular_name = 'a string'\n\n def __init__(self, empty=False):\n self.empty = empty\n\n def index(self, i):\n return StrType()\n\n def is_empty(self):\n return self.empty\n\n fields = _dict_extends(Type.fields, {})\n immutable = True\n\n\nStrType.fields.update({\n # Methods that return strings\n \"capitalize\": FunctionType(name='capitalize', returns=StrType()),\n \"center\": FunctionType(name='center', returns=StrType()),\n \"expandtabs\": FunctionType(name='expandtabs', returns=StrType()),\n \"join\": FunctionType(name='join', returns=StrType()),\n \"ljust\": FunctionType(name='ljust', returns=StrType()),\n \"lower\": FunctionType(name='lower', returns=StrType()),\n \"lstrip\": FunctionType(name='lstrip', returns=StrType()),\n \"replace\": FunctionType(name='replace', returns=StrType()),\n \"rjust\": FunctionType(name='rjust', returns=StrType()),\n \"rstrip\": FunctionType(name='rstrip', returns=StrType()),\n \"strip\": FunctionType(name='strip', returns=StrType()),\n \"swapcase\": FunctionType(name='swapcase', returns=StrType()),\n \"title\": FunctionType(name='title', returns=StrType()),\n \"translate\": FunctionType(name='translate', returns=StrType()),\n \"upper\": FunctionType(name='upper', returns=StrType()),\n \"zfill\": FunctionType(name='zfill', returns=StrType()),\n # Methods that return numbers\n \"count\": FunctionType(name='count', returns=NumType()),\n \"find\": FunctionType(name='find', returns=NumType()),\n \"rfind\": FunctionType(name='rfind', returns=NumType()),\n \"index\": FunctionType(name='index', returns=NumType()),\n \"rindex\": FunctionType(name='rindex', returns=NumType()),\n # Methods that return booleans\n \"startswith\": FunctionType(name='startswith', returns=BoolType()),\n \"endswith\": FunctionType(name='endswith', returns=BoolType()),\n \"isalnum\": FunctionType(name='isalnum', returns=BoolType()),\n \"isalpha\": FunctionType(name='isalpha', returns=BoolType()),\n \"isdigit\": FunctionType(name='isdigit', returns=BoolType()),\n \"islower\": FunctionType(name='islower', returns=BoolType()),\n \"isspace\": FunctionType(name='isspace', returns=BoolType()),\n \"istitle\": FunctionType(name='istitle', returns=BoolType()),\n \"isupper\": FunctionType(name='isupper', returns=BoolType()),\n # Methods that return List of Strings\n \"rsplit\": FunctionType(name='rsplit', returns=ListType(StrType(), empty=False)),\n \"split\": FunctionType(name='split', returns=ListType(StrType(), empty=False)),\n \"splitlines\": FunctionType(name='splitlines', returns=ListType(StrType(), empty=False))\n})\n\n\nclass FileType(Type):\n singular_name = 'a file'\n\n def index(self, i):\n return StrType()\n\n fields = _dict_extends(Type.fields, {\n 'close': FunctionType(name='close', returns='void'),\n 'read': FunctionType(name='read', returns=StrType()),\n 'readlines': FunctionType(name='readlines', returns=ListType(StrType(), False))\n })\n\n def is_empty(self):\n return False\n\n\nclass DictType(Type):\n singular_name = 'a dictionary'\n\n def __init__(self, empty=False, literals=None, keys=None, values=None):\n self.empty = empty\n self.literals = literals\n self.values = values\n self.keys = keys\n\n def clone(self):\n return DictType(self.empty, self.literals, self.keys, self.values)\n\n def is_empty(self):\n return self.empty\n\n def index(self, i):\n if self.empty:\n return UnknownType()\n elif self.literals is not None:\n for literal, value in zip(self.literals, self.values):\n if are_literals_equal(literal, i):\n return value.clone()\n return UnknownType()\n else:\n return self.keys.clone()\n\n def load_attr(self, attr, tifa, callee=None, callee_position=None):\n if attr == 'items':\n def _items(tifa, function_type, callee, args, position):\n if self.literals is None:\n return ListType(TupleType([self.keys, self.values]),\n empty=False)\n else:\n return ListType(TupleType([self.literals[0].type(),\n self.values[0]]),\n empty=False)\n\n return FunctionType(_items, 'items')\n elif attr == 'keys':\n def _keys(tifa, function_type, callee, args, position):\n if self.literals is None:\n return ListType(self.keys, empty=False)\n else:\n return ListType(self.literals[0].type(), empty=False)\n\n return FunctionType(_keys, 'keys')\n elif attr == 'values':\n def _values(tifa, function_type, callee, args, position):\n if self.literals is None:\n return ListType(self.values, empty=False)\n else:\n return ListType(self.values[0], empty=False)\n\n return FunctionType(_values, 'values')\n return Type.load_attr(self, attr, tifa, callee, callee_position)\n\n\nclass ModuleType(Type):\n singular_name = 'a module'\n\n def __init__(self, name=\"*UnknownModule\", submodules=None, fields=None):\n self.name = name\n if submodules is None:\n submodules = {}\n self.submodules = submodules\n if fields is None:\n fields = {}\n self.fields = fields\n\n\nclass SetType(ListType):\n singular_name = 'a set'\n\n\nclass GeneratorType(ListType):\n singular_name = 'a generator'\n\n\n# Custom parking class in blockpy\n\nclass TimeType(Type):\n singular_name = 'a time of day'\n\n\nclass DayType(Type):\n singular_name = 'a day of the week'\n\n\ntry:\n from numbers import Number\nexcept Exception:\n Number = int\n\nTYPE_LOOKUPS = {\n FunctionType: ('function', FunctionType, 'FunctionType'),\n ClassType: ('class', ClassType, 'ClassType'),\n InstanceType: ('instance', InstanceType, 'InstanceType'),\n NumType: ('num', int, float, complex, NumType, Number, 'NumType'),\n BoolType: ('bool', bool, BoolType, 'BoolType'),\n NoneType: ('None', None, NoneType, 'NoneType'),\n TupleType: ('tuple', tuple, TupleType, 'TupleType'),\n ListType: ('list', list, ListType, 'ListType'),\n StrType: ('str', str, StrType, 'StrType'),\n FileType: ('file', FileType, 'FileType'),\n DictType: ('dict', dict, DictType, 'DictType'),\n SetType: ('set', set, SetType, 'SetType'),\n}\n\n\ndef type_from_json(val):\n if val['type'] == 'DictType':\n values = [type_from_json(v) for v in val['values']]\n empty = val.get('empty', None)\n if 'literals' in val:\n literals = [literal_from_json(l) for l in val['literals']]\n return DictType(empty, literals=literals, values=values)\n else:\n keys = [type_from_json(k) for k in val['keys']]\n return DictType(empty, keys=keys, values=values)\n elif val['type'] == 'ListType':\n return ListType(type_from_json(val.get('subtype', None)),\n val.get('empty', None))\n elif val['type'] == 'StrType':\n return StrType(val.get('empty', None))\n elif val['type'] == 'BoolType':\n return BoolType()\n elif val['type'] == 'NoneType':\n return NoneType()\n elif val['type'] == 'NumType':\n return NumType()\n elif val['type'] == 'ModuleType':\n submodules = {name: type_from_json(m)\n for name, m in val.get('submodules', {}).items()}\n fields = {name: type_from_json(m)\n for name, m in val.get('fields', {}).items()}\n return ModuleType(name=val.get('name'), submodules=submodules,\n fields=fields)\n elif val['type'] == 'FunctionType':\n returns = type_from_json(val.get('returns', {'type': 'NoneType'}))\n return FunctionType(name=val.get('name'), returns=returns)\n\n\ndef type_to_literal(type):\n if isinstance(type, NumType):\n return LiteralNum(0)\n elif isinstance(type, StrType):\n return LiteralStr(\"\")\n else:\n # TODO: Finish the mapping\n return LiteralStr(\"\")\n\n\nTYPE_STRINGS = {\n \"str\": StrType, \"string\": StrType,\n \"num\": NumType, \"number\": NumType, \"int\": NumType, \"integer\": NumType, \"float\": NumType,\n \"complex\": NumType,\n \"bool\": BoolType, \"boolean\": BoolType,\n \"none\": NoneType,\n \"dict\": DictType, \"dictionary\": DictType,\n \"list\": ListType,\n \"tuple\": TupleType,\n \"set\": SetType,\n \"file\": FileType,\n \"func\": FunctionType, \"function\": FunctionType,\n \"class\": ClassType,\n}\n\n\ndef get_tifa_type_from_str(value, custom_types):\n value = value.lower()\n if value in custom_types:\n return custom_types[value]\n if value in TYPE_STRINGS:\n return TYPE_STRINGS[value]()\n else:\n custom_types.add(value)\n return UnknownType()\n # TODO: handle custom types\n\n\ndef get_tifa_type(v, custom_types):\n if isinstance(v, ast.Str):\n return get_tifa_type_from_str(v.s, custom_types)\n elif isinstance(v, ast.Name):\n return get_tifa_type_from_str(v.id, custom_types)\n","src/lib/pedal/tifa/type_operations.py":"import ast\n\nfrom pedal.tifa.type_definitions import (UnknownType, NumType, BoolType,\n TupleType, ListType, StrType,\n DictType, SetType, GeneratorType,\n DayType, TimeType)\n\n\ndef merge_types(left, right):\n # TODO: Check that lists/sets have the same subtypes\n if isinstance(left, (ListType, SetType, GeneratorType)):\n if left.empty:\n return right.subtype\n else:\n return left.subtype.clone()\n elif isinstance(left, TupleType):\n return left.subtypes + right.subtypes\n\n\ndef NumType_any(*x):\n return NumType()\n\n\ndef StrType_any(*x):\n return StrType()\n\n\ndef BoolType_any(*x):\n return BoolType()\n\n\ndef keep_left(left, right):\n return left\n\n\ndef keep_right(left, right):\n return right\n\n\nVALID_BINOP_TYPES = {\n ast.Add: {NumType: {NumType: NumType_any},\n StrType: {StrType: StrType_any},\n ListType: {ListType: merge_types},\n TupleType: {TupleType: merge_types}},\n ast.Sub: {NumType: {NumType: NumType_any},\n SetType: {SetType: merge_types}},\n ast.Div: {NumType: {NumType: NumType_any}},\n ast.FloorDiv: {NumType: {NumType: NumType_any}},\n ast.Mult: {NumType: {NumType: NumType_any,\n StrType: StrType_any,\n ListType: keep_right,\n TupleType: keep_right},\n StrType: {NumType: StrType_any},\n ListType: {NumType: keep_left},\n TupleType: {NumType: keep_left}},\n ast.Pow: {NumType: {NumType: NumType_any}},\n # TODO: Should we allow old-fashioned string interpolation?\n # Currently, I vote no because it makes the code harder and is bad form.\n ast.Mod: {NumType: {NumType: NumType_any}},\n ast.LShift: {NumType: {NumType: NumType_any}},\n ast.RShift: {NumType: {NumType: NumType_any}},\n ast.BitOr: {NumType: {NumType: NumType_any},\n BoolType: {NumType: NumType_any,\n BoolType: BoolType_any},\n SetType: {SetType: merge_types}},\n ast.BitXor: {NumType: {NumType: NumType_any},\n BoolType: {NumType: NumType_any,\n BoolType: BoolType_any},\n SetType: {SetType: merge_types}},\n ast.BitAnd: {NumType: {NumType: NumType_any},\n BoolType: {NumType: NumType_any,\n BoolType: BoolType_any},\n SetType: {SetType: merge_types}}\n}\nVALID_UNARYOP_TYPES = {\n ast.UAdd: {NumType: NumType},\n ast.USub: {NumType: NumType},\n ast.Invert: {NumType: NumType}\n}\n\n\ndef are_types_equal(left, right):\n \"\"\"\n Determine if two types are equal.\n\n This could be more Polymorphic - move the code for each type into\n its respective class instead.\n \"\"\"\n if left is None or right is None:\n return False\n elif isinstance(left, UnknownType) or isinstance(right, UnknownType):\n return False\n elif not isinstance(left, type(right)):\n return False\n elif isinstance(left, (GeneratorType, ListType)):\n if left.empty or right.empty:\n return True\n else:\n return are_types_equal(left.subtype, right.subtype)\n elif isinstance(left, TupleType):\n if left.empty or right.empty:\n return True\n elif len(left.subtypes) != len(right.subtypes):\n return False\n else:\n for l, r in zip(left.subtypes, right.subtypes):\n if not are_types_equal(l, r):\n return False\n return True\n elif isinstance(left, DictType):\n if left.empty or right.empty:\n return True\n elif left.literals is not None and right.literals is not None:\n if len(left.literals) != len(right.literals):\n return False\n else:\n for l, r in zip(left.literals, right.literals):\n if not are_types_equal(l, r):\n return False\n for l, r in zip(left.values, right.values):\n if not are_types_equal(l, r):\n return False\n return True\n elif left.literals is not None or right.literals is not None:\n return False\n else:\n keys_equal = are_types_equal(left.keys, right.keys)\n values_equal = are_types_equal(left.values, right.values)\n return keys_equal and values_equal\n else:\n return True\n\n\nORDERABLE_TYPES = (NumType, BoolType, StrType, ListType, DayType, TimeType,\n SetType, TupleType)\nINDEXABLE_TYPES = (StrType, ListType, SetType, TupleType, DictType)\n","src/lib/pedal/tifa/_temp_tifa.py":"import ast\nfrom pprint import pprint\n\nfrom pedal.report import MAIN_REPORT\n\nfrom pedal.tifa.type_definitions import (UnknownType, RecursedType,\n FunctionType, ClassType, InstanceType,\n NumType, NoneType, BoolType, TupleType,\n ListType, StrType, GeneratorType,\n DictType, ModuleType, SetType,\n # FileType, DayType, TimeType,\n type_from_json, type_to_literal,\n LiteralNum, LiteralBool,\n LiteralNone, LiteralStr,\n LiteralTuple)\nfrom pedal.tifa.builtin_definitions import (get_builtin_module, get_builtin_function)\nfrom pedal.tifa.type_operations import (merge_types, are_types_equal,\n VALID_UNARYOP_TYPES, VALID_BINOP_TYPES,\n ORDERABLE_TYPES, INDEXABLE_TYPES)\nfrom pedal.tifa.identifier import Identifier\nfrom pedal.tifa.state import State\nfrom pedal.tifa.messages import _format_message\n\n__all__ = ['Tifa']\n\nclass NewPath:\n \"\"\"\n Context manager for entering and leaving execution paths (e.g., if\n statements).)\n\n Args:\n tifa (Tifa): The tifa instance, so we can modify some of its\n properties that track variables and paths.\n origin_path (int): The path ID parent to this one.\n name (str): The symbolic name of this path, typically 'i' for an IF\n body and 'e' for ELSE body.\n\n Fields:\n id (int): The path ID of this path\n \"\"\"\n\n def __init__(self, tifa, origin_path, name):\n self.tifa = tifa\n self.name = name\n self.origin_path = origin_path\n self.id = None\n\n def __enter__(self):\n self.tifa.path_id += 1\n self.id = self.tifa.path_id\n self.tifa.path_names.append(str(self.id) + self.name)\n self.tifa.path_chain.insert(0, self.id)\n self.tifa.name_map[self.id] = {}\n self.tifa.path_parents[self.id] = self.origin_path\n\n def __exit__(self, type, value, traceback):\n self.tifa.path_names.pop()\n self.tifa.path_chain.pop(0)\n\nclass NewScope:\n \"\"\"\n Context manager for entering and leaving scopes (e.g., inside of\n function calls).\n\n Args:\n tifa (Tifa): The tifa instance, so we can modify some of its\n properties that track variables and paths.\n definitions_scope_chain (list of int): The scope chain of the\n definition\n \"\"\"\n\n def __init__(self, tifa, definitions_scope_chain, class_type=None):\n self.tifa = tifa\n self.definitions_scope_chain = definitions_scope_chain\n self.class_type = class_type\n\n def __enter__(self):\n # Manage scope\n self.old_scope = self.tifa.scope_chain[:]\n # Move to the definition's scope chain\n self.tifa.scope_chain = self.definitions_scope_chain[:]\n # And then enter its body's new scope\n self.tifa.scope_id += 1\n self.tifa.scope_chain.insert(0, self.tifa.scope_id)\n # Register as class potentially\n if self.class_type is not None:\n self.class_type.scope_id = self.tifa.scope_id\n self.tifa.class_scopes[self.tifa.scope_id] = self.class_type\n\n def __exit__(self, type, value, traceback):\n # Finish up the scope\n self.tifa._finish_scope()\n # Leave the body\n self.tifa.scope_chain.pop(0)\n # Restore the scope\n self.tifa.scope_chain = self.old_scope\n\ndef in_scope(full_name, scope_chain):\n \"\"\"\n Determine if the fully qualified variable name is in the given scope\n chain.\n\n Args:\n full_name (str): A fully qualified variable name\n scope_chain (list): A representation of a scope chain.\n Returns:\n bool: Whether the variable lives in this scope\n \"\"\"\n # Get this entity's full scope chain\n name_scopes = full_name.split(\"/\")[:-1]\n # against the reverse scope chain\n checking_scopes = [str(s) for s in reversed(scope_chain)]\n return name_scopes == checking_scopes\n\ndef match_rso(left, right):\n if left == right:\n return left\n else:\n return \"maybe\"\n\n\nclass Tifa(ast.NodeVisitor):\n \"\"\"\n TIFA Class for traversing an AST and finding common issues.\n\n Args:\n python_3 (bool): Whether to parse the code in regular PYTHON_3 mode or\n the modified AST that Skulpt uses.\n report (Report): The report object to store data and feedback in. If\n left None, defaults to the global MAIN_REPORT.\n \"\"\"\n\n def __init__(self, python_3=True, report=None):\n if report is None:\n report = MAIN_REPORT\n self.report = report\n self._initialize_report()\n self.PYTHON_3 = python_3\n\n def _initialize_report(self):\n \"\"\"\n Initialize a successful report with possible set of issues.\n \"\"\"\n self.report['tifa'] = {\n 'success': True,\n 'variables': {},\n 'top_level_variables': {},\n 'issues': {}\n }\n\n def report_issue(self, issue, data=None):\n \"\"\"\n Report the given issue with associated metadata, including the position\n if not explicitly included.\n \"\"\"\n if data is None:\n data = {}\n if 'position' not in data:\n data['position'] = self.locate()\n data['message'] = _format_message(issue, data)\n if issue not in self.report['tifa']['issues']:\n self.report['tifa']['issues'][issue] = []\n self.report['tifa']['issues'][issue].append(data)\n if data['message']:\n self.report.attach(issue, category='Analyzer', tool='TIFA',\n mistake=data)\n\n def locate(self, node=None):\n \"\"\"\n Return a dictionary representing the current location within the\n AST.\n\n Returns:\n Position dict: A dictionary with the fields 'column' and 'line',\n indicating the current position in the source code.\n \"\"\"\n if node is None:\n if self.node_chain:\n node = self.node_chain[-1]\n else:\n node = self.final_node\n return {'column': node.col_offset, 'line': node.lineno}\n\n def process_code(self, code, filename=\"__main__\"):\n \"\"\"\n Processes the AST of the given source code to generate a report.\n\n Args:\n code (str): The Python source code\n filename (str): The filename of the source code (defaults to __main__)\n Returns:\n Report: The successful or successful report object\n \"\"\"\n # Code\n self.source = code.split(\"\\n\") if code else []\n filename = filename\n\n # Attempt parsing - might fail!\n try:\n ast_tree = ast.parse(code, filename)\n except Exception as error:\n self.report['tifa']['success'] = False\n self.report['tifa']['error'] = error\n self.report.attach('tifa_error', category='Analyzer', tool='TIFA',\n mistake={\n 'message': \"Could not parse code\",\n 'error': error\n })\n return self.report['tifa']\n try:\n return self.process_ast(ast_tree)\n except Exception as error:\n self.report['tifa']['success'] = False\n self.report['tifa']['error'] = error\n self.report.attach('tifa_error', category='Analyzer', tool='TIFA',\n mistake={\n 'message': \"Could not process code\",\n 'error': error\n })\n return self.report['tifa']\n\n def process_ast(self, ast_tree):\n \"\"\"\n Given an AST, actually performs the type and flow analyses to return a \n report.\n\n Args:\n ast (Ast): The AST object\n Returns:\n Report: The final report object created (also available as a field).\n \"\"\"\n self._reset()\n # Traverse every node\n self.visit(ast_tree)\n\n # Check afterwards\n self.report['tifa']['variables'] = self.name_map\n self._finish_scope()\n\n # Collect top level variables\n self._collect_top_level_variables()\n # print(self.report['variables'])\n\n return self.report['tifa']\n\n def _collect_top_level_variables(self):\n \"\"\"\n Walk through the variables and add any at the top level to the\n top_level_variables field of the report.\n \"\"\"\n top_level_variables = self.report['tifa']['top_level_variables']\n main_path_vars = self.name_map[self.path_chain[0]]\n for full_name in main_path_vars:\n split_name = full_name.split(\"/\")\n if len(split_name) == 2 and split_name[0] == str(self.scope_chain[0]):\n name = split_name[1]\n top_level_variables[name] = main_path_vars[full_name]\n\n def _reset(self):\n \"\"\"\n Reinitialize fields for maintaining the system\n \"\"\"\n # Unique Global IDs\n self.path_id = 0\n self.scope_id = 0\n self.ast_id = 0\n\n # Human readable names\n self.path_names = ['*Module']\n self.scope_names = ['*Module']\n self.node_chain = []\n\n # Complete record of all Names\n self.scope_chain = [self.scope_id]\n self.path_chain = [self.path_id]\n self.name_map = {}\n self.name_map[self.path_id] = {}\n self.definition_chain = []\n self.path_parents = {}\n self.final_node = None\n self.class_scopes = {}\n\n def find_variable_scope(self, name):\n \"\"\"\n Walk through this scope and all enclosing scopes, finding the relevant\n identifier given by `name`.\n\n Args:\n name (str): The name of the variable\n Returns:\n Identifier: An Identifier for the variable, which could potentially\n not exist.\n \"\"\"\n for scope_level, scope in enumerate(self.scope_chain):\n for path_id in self.path_chain:\n path = self.name_map[path_id]\n full_name = \"/\".join(map(str, self.scope_chain[scope_level:])) + \"/\" + name\n if full_name in path:\n is_root_scope = (scope_level == 0)\n return Identifier(True, is_root_scope,\n full_name, path[full_name])\n\n return Identifier(False)\n\n def find_variable_out_of_scope(self, name):\n \"\"\"\n Walk through every scope and determine if this variable can be found\n elsewhere (which would be an issue).\n\n Args:\n name (str): The name of the variable\n Returns:\n Identifier: An Identifier for the variable, which could potentially\n not exist.\n \"\"\"\n for path in self.name_map.values():\n for full_name in path:\n unscoped_name = full_name.split(\"/\")[-1]\n if name == unscoped_name:\n return Identifier(True, False, unscoped_name, path[full_name])\n return Identifier(False)\n\n def find_path_parent(self, path_id, name):\n if name in self.name_map[path_id]:\n return Identifier(True, state=self.name_map[path_id][name])\n else:\n path_parent = self.path_parents.get(path_id)\n if path_parent is None:\n return Identifier(False)\n else:\n return self.find_path_parent(path_parent, name)\n\n def _finish_scope(self):\n \"\"\"\n Walk through all the variables present in this scope and ensure that\n they have been read and not overwritten.\n \"\"\"\n path_id = self.path_chain[0]\n for name in self.name_map[path_id]:\n if in_scope(name, self.scope_chain):\n state = self.name_map[path_id][name]\n if state.over == 'yes':\n position = state.over_position\n self.report_issue('Overwritten Variable',\n {'name': state.name, 'position': position})\n if state.read == 'no':\n self.report_issue('Unused Variable',\n {'name': state.name, 'type': state.type,\n 'position': state.position})\n\n def visit(self, node):\n \"\"\"\n Process this node by calling its appropriate visit_*\n\n Args:\n node (AST): The node to visit\n Returns:\n Type: The type calculated during the visit.\n \"\"\"\n # Start processing the node\n self.node_chain.append(node)\n self.ast_id += 1\n\n # Actions after return?\n if len(self.scope_chain) > 1:\n return_state = self.find_variable_scope(\"*return\")\n if return_state.exists and return_state.in_scope:\n if return_state.state.set == \"yes\":\n self.report_issue(\"Action after return\")\n\n # No? All good, let's enter the node\n self.final_node = node\n result = ast.NodeVisitor.visit(self, node)\n\n # Pop the node out of the chain\n self.ast_id -= 1\n self.node_chain.pop()\n\n # If a node failed to return something, return the UNKNOWN TYPE\n if result is None:\n return UnknownType()\n else:\n return result\n\n def _visit_nodes(self, nodes):\n \"\"\"\n Visit all the nodes in the given list.\n\n Args:\n nodes (list): A list of values, of which any AST nodes will be\n visited.\n \"\"\"\n for node in nodes:\n if isinstance(node, ast.AST):\n self.visit(node)\n\n def walk_targets(self, targets, type, walker):\n \"\"\"\n Iterate through the targets and call the given function on each one.\n\n Args:\n targets (list of Ast nodes): A list of potential targets to be\n traversed.\n type (Type): The given type to be unraveled and applied to the\n targets.\n walker (Ast Node, Type -> None): A function that will process\n each target and unravel the type.\n \"\"\"\n for target in targets:\n walker(target, type)\n\n def _walk_target(self, target, type):\n \"\"\"\n Recursively apply the type to the target\n\n Args:\n target (Ast): The current AST node to process\n type (Type): The type to apply to this node\n \"\"\"\n if isinstance(target, ast.Name):\n self.store_iter_variable(target.id, type, self.locate(target))\n return target.id\n elif isinstance(target, (ast.Tuple, ast.List)):\n result = None\n for i, elt in enumerate(target.elts):\n elt_type = type.index(LiteralNum(i))\n potential_name = self._walk_target(elt, elt_type)\n if potential_name is not None and result is None:\n result = potential_name\n return result\n\n def visit_Assign(self, node):\n \"\"\"\n Simple assignment statement:\n __targets__ = __value__\n\n Args:\n node (AST): An Assign node\n Returns:\n None\n \"\"\"\n # Handle value\n value_type = self.visit(node.value)\n # Handle targets\n self._visit_nodes(node.targets)\n\n # TODO: Properly handle assignments with subscripts\n def action(target, type):\n if isinstance(target, ast.Name):\n self.store_variable(target.id, type)\n elif isinstance(target, (ast.Tuple, ast.List)):\n for i, elt in enumerate(target.elts):\n eltType = type.index(LiteralNum(i))\n action(elt, eltType)\n elif isinstance(target, ast.Subscript):\n pass\n elif isinstance(target, ast.Attribute):\n left_hand_type = self.visit(target.value)\n if isinstance(left_hand_type, InstanceType):\n left_hand_type.add_attr(target.attr, type)\n # TODO: Otherwise we attempted to assign to a non-instance\n # TODO: Handle minor type changes (e.g., appending to an inner list)\n\n self.walk_targets(node.targets, value_type, action)\n\n def visit_AugAssign(self, node):\n # Handle value\n right = self.visit(node.value)\n # Handle target\n left = self.visit(node.target)\n # Target is always a Name, Subscript, or Attribute\n name = self.identify_caller(node.target)\n\n # Handle operation\n self.load_variable(name)\n if isinstance(left, UnknownType) or isinstance(right, UnknownType):\n return UnknownType()\n elif type(node.op) in VALID_BINOP_TYPES:\n op_lookup = VALID_BINOP_TYPES[type(node.op)]\n if type(left) in op_lookup:\n op_lookup = op_lookup[type(left)]\n if type(right) in op_lookup:\n op_lookup = op_lookup[type(right)]\n result_type = op_lookup(left, right)\n self.store_variable(name, result_type)\n return result_type\n\n self.report_issue(\"Incompatible types\",\n {\"left\": left, \"right\": right,\n \"operation\": node.op})\n\n def visit_Attribute(self, node):\n # Handle value\n value_type = self.visit(node.value)\n # Handle ctx\n # TODO: Handling contexts\n # Handle attr\n return value_type.load_attr(node.attr, self, node.value, self.locate())\n\n def visit_BinOp(self, node):\n # Handle left and right\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n # Handle operation\n if isinstance(left, UnknownType) or isinstance(right, UnknownType):\n return UnknownType()\n elif type(node.op) in VALID_BINOP_TYPES:\n op_lookup = VALID_BINOP_TYPES[type(node.op)]\n if type(left) in op_lookup:\n op_lookup = op_lookup[type(left)]\n if type(right) in op_lookup:\n op_lookup = op_lookup[type(right)]\n return op_lookup(left, right)\n\n self.report_issue(\"Incompatible types\",\n {\"left\": left, \"right\": right,\n \"operation\": node.op})\n return UnknownType()\n\n def visit_Bool(self, node):\n return BoolType()\n\n def visit_BoolOp(self, node):\n # Handle left and right\n values = []\n for value in node.values:\n values.append(self.visit(value))\n\n # TODO: Truthiness is not supported! Probably need a Union type\n # TODO: Literals used as truthy value\n\n # Handle operation\n return BoolType()\n\n def visit_Call(self, node):\n # Handle func part (Name or Attribute)\n function_type = self.visit(node.func)\n # TODO: Need to grab the actual type in some situations\n callee = self.identify_caller(node)\n\n # Handle args\n arguments = [self.visit(arg) for arg in node.args]\n\n # TODO: Handle keywords\n # TODO: Handle starargs\n # TODO: Handle kwargs\n if isinstance(function_type, FunctionType):\n # Test if we have called this definition before\n if function_type.definition not in self.definition_chain:\n self.definition_chain.append(function_type.definition)\n # Function invocation\n result = function_type.definition(self, function_type, callee,\n arguments, self.locate())\n self.definition_chain.pop()\n return result\n else:\n self.report_issue(\"Recursive Call\", {\"name\": callee})\n elif isinstance(function_type, ClassType):\n constructor = function_type.get_constructor().definition\n self.definition_chain.append(constructor)\n result = constructor(self, constructor, callee, arguments, self.locate())\n self.definition_chain.pop()\n if '__init__' in function_type.fields:\n initializer = function_type.fields['__init__']\n if isinstance(initializer, FunctionType):\n self.definition_chain.append(initializer)\n initializer.definition(self, initializer, result, [result] + arguments, self.locate())\n self.definition_chain.pop()\n return result\n else:\n self.report_issue(\"Not a function\", {\"name\": callee})\n return UnknownType()\n\n def visit_ClassDef(self, node):\n class_name = node.name\n new_class_type = ClassType(class_name)\n self.store_variable(class_name, new_class_type)\n # TODO: Define a new scope definition that executes the body\n # TODO: find __init__, execute that\n definitions_scope = self.scope_chain[:]\n class_scope = NewScope(self, definitions_scope, class_type=new_class_type)\n with class_scope:\n self.generic_visit(node)\n\n def visit_Compare(self, node):\n # Handle left and right\n left = self.visit(node.left)\n comparators = [self.visit(compare) for compare in node.comparators]\n\n # Handle ops\n for op, right in zip(node.ops, comparators):\n if isinstance(op, (ast.Eq, ast.NotEq, ast.Is, ast.IsNot)):\n continue\n elif isinstance(op, (ast.Lt, ast.LtE, ast.GtE, ast.Gt)):\n if are_types_equal(left, right):\n if isinstance(left, ORDERABLE_TYPES):\n continue\n elif isinstance(op, (ast.In, ast.NotIn)):\n if isinstance(right, INDEXABLE_TYPES):\n continue\n self.report_issue(\"Incompatible types\",\n {\"left\": left, \"right\": right,\n \"operation\": op})\n return BoolType()\n\n def _visit_collection_loop(self, node):\n # Handle the iteration list\n iter = node.iter\n iter_list_name = None\n if isinstance(iter, ast.Name):\n iter_list_name = iter.id\n if iter_list_name == \"___\":\n self.report_issue(\"Unconnected blocks\",\n {\"position\": self.locate(iter)})\n state = self.iterate_variable(iter_list_name, self.locate(iter))\n iter_type = state.type\n else:\n iter_type = self.visit(iter)\n\n if iter_type.is_empty():\n self.report_issue(\"Iterating over empty list\",\n {\"name\": iter_list_name,\n \"position\": self.locate(iter)})\n\n if not isinstance(iter_type, INDEXABLE_TYPES):\n self.report_issue(\"Iterating over non-list\",\n {\"name\": iter_list_name,\n \"position\": self.locate(iter)})\n\n iter_subtype = iter_type.index(LiteralNum(0))\n\n # Handle the iteration variable\n iter_variable_name = self._walk_target(node.target, iter_subtype)\n\n if iter_variable_name and iter_list_name:\n if iter_variable_name == iter_list_name:\n self.report_issue(\"Iteration Problem\",\n {\"name\": iter_variable_name,\n \"position\": self.locate(node.target)})\n\n def visit_comprehension(self, node):\n self._visit_collection_loop(node)\n # Handle the bodies\n self.visit_statements(node.ifs)\n\n def visit_Dict(self, node):\n \"\"\"\n Three types of dictionaries\n - empty\n - uniform type\n - record\n \"\"\"\n type = DictType()\n if not node.keys:\n type.empty = True\n else:\n type.empty = False\n all_literals = True\n keys, values, literals = [], [], []\n for key, value in zip(node.keys, node.values):\n literal = self.get_literal(key)\n key, value = self.visit(key), self.visit(value)\n values.append(value)\n keys.append(key)\n if literal is not None:\n literals.append(literal)\n else:\n all_literals = False\n\n if all_literals:\n type.literals = literals\n type.values = values\n else:\n type.keys = key\n type.values = value\n return type\n\n def visit_DictComp(self, node):\n # TODO: Handle comprehension scope\n for generator in node.generators:\n self.visit(generator)\n keys = self.visit(node.key)\n values = self.visit(node.value)\n return DictType(keys=keys, values=values)\n\n def visit_For(self, node):\n self._visit_collection_loop(node)\n # Handle the bodies\n self.visit_statements(node.body)\n self.visit_statements(node.orelse)\n\n def visit_FunctionDef(self, node):\n # Name\n function_name = node.name\n position = self.locate()\n definitions_scope = self.scope_chain[:]\n\n def definition(tifa, call_type, call_name, parameters, call_position):\n function_scope = NewScope(self, definitions_scope)\n with function_scope:\n # Process arguments\n args = node.args.args\n if len(args) != len(parameters):\n self.report_issue('Incorrect Arity', {\"position\": position})\n # TODO: Handle special types of parameters\n for arg, parameter in zip(args, parameters):\n name = arg.arg if self.PYTHON_3 else arg.id\n if parameter is not None:\n parameter = parameter.clone_mutably()\n self.store_variable(name, parameter, position)\n if len(args) < len(parameters):\n for undefined_parameter in parameters[len(args):]:\n self.store_variable(name, UnknownType(), position)\n self.visit_statements(node.body)\n return_state = self.find_variable_scope(\"*return\")\n return_value = NoneType()\n # If the pseudo variable exists, we load it and get its type\n if return_state.exists and return_state.in_scope:\n return_state = self.load_variable(\"*return\", call_position)\n return_value = return_state.type\n return return_value\n\n function = FunctionType(definition=definition, name=function_name)\n self.store_variable(function_name, function)\n return function\n\n def visit_GeneratorExp(self, node):\n # TODO: Handle comprehension scope\n for generator in node.generators:\n self.visit(generator)\n return GeneratorType(self.visit(node.elt))\n\n def visit_If(self, node):\n # Visit the conditional\n self.visit(node.test)\n\n if len(node.orelse) == 1 and isinstance(node.orelse[0], ast.Pass):\n self.report_issue(\"Malformed Conditional\")\n elif len(node.body) == 1 and isinstance(node.body[0], ast.Pass):\n if node.orelse:\n self.report_issue(\"Malformed Conditional\")\n\n # Visit the bodies\n this_path_id = self.path_chain[0]\n if_path = NewPath(self, this_path_id, \"i\")\n with if_path:\n for statement in node.body:\n self.visit(statement)\n else_path = NewPath(self, this_path_id, \"e\")\n with else_path:\n for statement in node.orelse:\n self.visit(statement)\n\n # Combine two paths into one\n # Check for any names that are on the IF path\n self.merge_paths(this_path_id, if_path.id, else_path.id)\n\n def visit_IfExp(self, node):\n # Visit the conditional\n self.visit(node.test)\n\n # Visit the body\n body = self.visit(node.body)\n\n # Visit the orelse\n orelse = self.visit(node.orelse)\n\n if are_types_equal(body, orelse):\n return body\n\n # TODO: Union type?\n return UnknownType()\n\n def visit_Import(self, node):\n # Handle names\n for alias in node.names:\n asname = alias.asname or alias.name\n module_type = self.load_module(alias.name)\n self.store_variable(asname, module_type)\n\n def visit_ImportFrom(self, node):\n # Handle names\n for alias in node.names:\n if node.module is None:\n asname = alias.asname or alias.name\n module_type = self.load_module(alias.name)\n else:\n module_name = node.module\n asname = alias.asname or alias.name\n module_type = self.load_module(module_name)\n name_type = module_type.load_attr(alias.name, self,\n callee_position=self.locate())\n self.store_variable(asname, name_type)\n\n def visit_Lambda(self, node):\n # Name\n position = self.locate()\n definitions_scope = self.scope_chain[:]\n\n def definition(tifa, call_type, call_name, parameters, call_position):\n function_scope = NewScope(self, definitions_scope)\n with function_scope:\n # Process arguments\n args = node.args.args\n if len(args) != len(parameters):\n self.report_issue('Incorrect Arity', {\"position\": position})\n # TODO: Handle special types of parameters\n for arg, parameter in zip(args, parameters):\n name = arg.arg if self.PYTHON_3 else arg.id\n if parameter is not None:\n parameter = parameter.clone_mutably()\n self.store_variable(name, parameter, position)\n if len(args) < len(parameters):\n for undefined_parameter in parameters[len(args):]:\n self.store_variable(name, UnknownType(), position)\n return_value = self.visit(node.body)\n return return_value\n\n return FunctionType(definition=definition)\n\n def visit_List(self, node):\n type = ListType()\n if node.elts:\n type.empty = False\n # TODO: confirm homogenous subtype\n for elt in node.elts:\n type.subtype = self.visit(elt)\n else:\n type.empty = True\n return type\n\n def visit_ListComp(self, node):\n # TODO: Handle comprehension scope\n for generator in node.generators:\n self.visit(generator)\n return ListType(self.visit(node.elt))\n\n def visit_Name(self, node):\n name = node.id\n if name == \"___\":\n self.report_issue(\"Unconnected blocks\")\n if isinstance(node.ctx, ast.Load):\n if name == \"True\" or name == \"False\":\n return BoolType()\n elif name == \"None\":\n return NoneType()\n else:\n variable = self.find_variable_scope(name)\n builtin = get_builtin_function(name)\n if not variable.exists and builtin:\n return builtin\n else:\n state = self.load_variable(name)\n return state.type\n else:\n variable = self.find_variable_scope(name)\n if variable.exists:\n return variable.state.type\n else:\n return UnknownType()\n\n def visit_Num(self, node):\n return NumType()\n\n def visit_Return(self, node):\n if len(self.scope_chain) == 1:\n self.report_issue(\"Return outside function\")\n if node.value is not None:\n self.return_variable(self.visit(node.value))\n else:\n self.return_variable(NoneType())\n\n def visit_SetComp(self, node):\n # TODO: Handle comprehension scope\n for generator in node.generators:\n self.visit(generator)\n return SetType(self.visit(node.elt))\n\n def visit_statements(self, nodes):\n # TODO: Check for pass in the middle of a series of statement\n if any(isinstance(node, ast.Pass) for node in nodes):\n pass\n return [self.visit(statement) for statement in nodes]\n\n def visit_Str(self, node):\n if node.s == \"\":\n return StrType(True)\n else:\n return StrType(False)\n\n def visit_Subscript(self, node):\n # Handle value\n value_type = self.visit(node.value)\n # Handle slice\n if isinstance(node.slice, ast.Index):\n literal = self.get_literal(node.slice.value)\n if literal is None:\n dynamic_literal = type_to_literal(self.visit(node.slice.value))\n return value_type.index(dynamic_literal)\n else:\n return value_type.index(literal)\n elif isinstance(node.slice, ast.Slice):\n if node.slice.lower is not None:\n self.visit(node.slice.lower)\n if node.slice.upper is not None:\n self.visit(node.slice.upper)\n if node.slice.step is not None:\n self.visit(node.slice.step)\n return value_type\n\n def visit_Tuple(self, node):\n type = TupleType()\n if not node.elts:\n type.empty = True\n type.subtypes = []\n else:\n type.empty = False\n # TODO: confirm homogenous subtype\n type.subtypes = [self.visit(elt) for elt in node.elts]\n return type\n\n def visit_UnaryOp(self, node):\n # Handle operand\n operand = self.visit(node.operand)\n\n if isinstance(node.op, ast.Not):\n return BoolType()\n elif isinstance(operand, UnknownType):\n return UnknownType()\n elif type(node.op) in VALID_UNARYOP_TYPES:\n op_lookup = VALID_UNARYOP_TYPES[type(node.op)]\n if type(node.op) in op_lookup:\n op_lookup = op_lookup[type(node.op)]\n if type(operand) in op_lookup:\n op_lookup = op_lookup[type(operand)]\n return op_lookup(operand)\n return UnknownType()\n\n def visit_While(self, node):\n # Visit conditional\n self.visit(node.test)\n\n # Visit the bodies\n this_path_id = self.path_id\n # One path is that we never enter the body\n empty_path = NewPath(self, this_path_id, \"e\")\n with empty_path:\n pass\n # Another path is that we loop through the body and check the test again\n body_path = NewPath(self, this_path_id, \"w\")\n with body_path:\n for statement in node.body:\n self.visit(statement)\n # Revisit conditional\n self.visit(node.test)\n # If there's else bodies (WEIRD) then we should check them afterwards\n if node.orelse:\n self.report_issue(\"Else on loop body\")\n for statement in node.orelse:\n self.visit(statement)\n\n # Combine two paths into one\n # Check for any names that are on the IF path\n self.merge_paths(this_path_id, body_path.id, empty_path.id)\n\n def visit_With(self, node):\n if self.PYTHON_3:\n for item in node.items:\n type_value = self.visit(item.context_expr)\n self.visit(item.optional_vars)\n self._walk_target(item.optional_vars, type_value)\n else:\n type_value = self.visit(node.context_expr)\n # self.visit(node.optional_vars)\n self._walk_target(node.optional_vars, type_value)\n # Handle the bodies\n self.visit_statements(node.body)\n\n def _scope_chain_str(self, name=None):\n \"\"\"\n Convert the current scope chain to a string representation (divided \n by \"/\").\n\n Returns:\n str: String representation of the scope chain.\n \"\"\"\n if name:\n return \"/\".join(map(str, self.scope_chain)) + \"/\" + name\n else:\n return \"/\".join(map(str, self.scope_chain))\n\n def identify_caller(self, node):\n \"\"\"\n Figures out the variable that was used to kick off this call,\n which is almost always the relevant Name to track as being updated.\n If the origin wasn't a Name, nothing will need to be updated so None\n is returned instead.\n\n TODO: Is this sufficient?\n\n Args:\n node (AST): An AST node\n Returns:\n str or None: The name of the variable or None if no origin could\n be found.\n \"\"\"\n if isinstance(node, ast.Name):\n return node.id\n elif isinstance(node, ast.Call):\n return self.identify_caller(node.func)\n elif isinstance(node, (ast.Attribute, ast.Subscript)):\n return self.identify_caller(node.value)\n return None\n\n def iterate_variable(self, name, position=None):\n \"\"\"\n Update the variable by iterating through it - this doesn't do anything\n fancy yet.\n \"\"\"\n return self.load_variable(name, position)\n\n def store_iter_variable(self, name, type, position=None):\n state = self.store_variable(name, type, position)\n state.read = 'yes'\n return state\n\n def return_variable(self, type):\n return self.store_variable(\"*return\", type)\n\n def append_variable(self, name, type, position=None):\n return self.store_variable(name, type, position)\n\n def store_variable(self, name, type, position=None):\n \"\"\"\n Update the variable with the given name to now have the new type.\n\n Args:\n name (str): The unqualified name of the variable. The variable will\n be assumed to be in the current scope.\n type (Type): The new type of this variable.\n Returns:\n State: The new state of the variable.\n \"\"\"\n if position is None:\n position = self.locate()\n full_name = self._scope_chain_str(name)\n current_path = self.path_chain[0]\n variable = self.find_variable_scope(name)\n if not variable.exists:\n # Create a new instance of the variable on the current path\n new_state = State(name, [], type, 'store', position,\n read='no', set='yes', over='no')\n self.name_map[current_path][full_name] = new_state\n else:\n new_state = self.trace_state(variable.state, \"store\", position)\n if not variable.in_scope:\n self.report_issue(\"Write out of scope\", {'name': name})\n # Type change?\n if not are_types_equal(type, variable.state.type):\n self.report_issue(\"Type changes\",\n {'name': name, 'old': variable.state.type,\n 'new': type, 'position': position})\n new_state.type = type\n # Overwritten?\n if variable.state.set == 'yes' and variable.state.read == 'no':\n new_state.over_position = position\n new_state.over = 'yes'\n else:\n new_state.set = 'yes'\n new_state.read = 'no'\n self.name_map[current_path][full_name] = new_state\n # If this is a class scope...\n current_scope = self.scope_chain[0]\n if current_scope in self.class_scopes:\n self.class_scopes[current_scope].add_attr(name, new_state.type)\n return new_state\n\n def load_variable(self, name, position=None):\n \"\"\"\n Retrieve the variable with the given name.\n\n Args:\n name (str): The unqualified name of the variable. If the variable is\n not found in the current scope or an enclosing sope, all\n other scopes will be searched to see if it was read out\n of scope.\n Returns:\n State: The current state of the variable.\n \"\"\"\n full_name = self._scope_chain_str(name)\n current_path = self.path_chain[0]\n variable = self.find_variable_scope(name)\n if position is None:\n position = self.locate()\n if not variable.exists:\n out_of_scope_var = self.find_variable_out_of_scope(name)\n # Create a new instance of the variable on the current path\n if out_of_scope_var.exists:\n self.report_issue(\"Read out of scope\", {'name': name})\n else:\n self.report_issue(\"Initialization Problem\", {'name': name})\n new_state = State(name, [], UnknownType(), 'load', position,\n read='yes', set='no', over='no')\n self.name_map[current_path][full_name] = new_state\n else:\n new_state = self.trace_state(variable.state, \"load\", position)\n if variable.state.set == 'no':\n self.report_issue(\"Initialization Problem\", {'name': name})\n if variable.state.set == 'maybe':\n self.report_issue(\"Possible Initialization Problem\", {'name': name})\n new_state.read = 'yes'\n if not variable.in_scope:\n self.name_map[current_path][variable.scoped_name] = new_state\n else:\n self.name_map[current_path][full_name] = new_state\n return new_state\n\n def load_module(self, chain):\n \"\"\"\n Finds the module in the set of available modules.\n\n Args:\n chain (str): A chain of module imports (e.g., \"matplotlib.pyplot\")\n Returns:\n ModuleType: The specific module with its members, or an empty\n module type.\n \"\"\"\n module_names = chain.split('.')\n potential_module = get_builtin_module(module_names[0])\n if potential_module is not None:\n base_module = potential_module\n for module in module_names:\n if (isinstance(base_module, ModuleType) and\n module in base_module.submodules):\n base_module = base_module.submodules[module]\n else:\n self.report_issue(\"Module not found\", {\"name\": chain})\n return base_module\n else:\n try:\n actual_module = __import__(chain, globals(), {},\n ['_tifa_definitions'])\n definitions = actual_module._tifa_definitions()\n return type_from_json(definitions)\n except Exception as e:\n self.report_issue(\"Module not found\",\n {\"name\": chain, \"error\": str(e)})\n return ModuleType()\n\n def combine_states(self, left, right):\n state = State(left.name, [left], left.type, 'branch', self.locate(),\n read=left.read, set=left.set, over=left.over,\n over_position=left.over_position)\n if right is None:\n state.read = 'no' if left.read == 'no' else 'maybe'\n state.set = 'no' if left.set == 'no' else 'maybe'\n state.over = 'no' if left.over == 'no' else 'maybe'\n else:\n if not are_types_equal(left.type, right.type):\n self.report_issue(\"Type changes\", {'name': left.name,\n 'old': left.type,\n 'new': right.type})\n state.read = match_rso(left.read, right.read)\n state.set = match_rso(left.set, right.set)\n state.over = match_rso(left.over, right.over)\n if left.over == 'no':\n state.over_position = right.over_position\n state.trace.append(right)\n return state\n\n def merge_paths(self, parent_path_id, left_path_id, right_path_id):\n \"\"\"\n Combines any variables on the left and right path into the parent\n name space.\n\n Args:\n parent_path_id (int): The parent path of the left and right branches\n left_path_id (int): One of the two paths\n right_path_id (int): The other of the two paths.\n \"\"\"\n # Combine two paths into one\n # Check for any names that are on the IF path\n for left_name in self.name_map[left_path_id]:\n left_state = self.name_map[left_path_id][left_name]\n right_identifier = self.find_path_parent(right_path_id, left_name)\n if right_identifier.exists:\n # Was on both IF and ELSE path\n right_state = right_identifier.state\n else:\n # Was only on IF path, potentially on the parent path\n right_state = self.name_map[parent_path_id].get(left_name)\n combined = self.combine_states(left_state, right_state)\n self.name_map[parent_path_id][left_name] = combined\n # Check for names that are on the ELSE path but not the IF path\n for right_name in self.name_map[right_path_id]:\n if right_name not in self.name_map[left_path_id]:\n right_state = self.name_map[right_path_id][right_name]\n # Potentially on the parent path\n parent_state = self.name_map[parent_path_id].get(right_name)\n combined = self.combine_states(right_state, parent_state)\n self.name_map[parent_path_id][right_name] = combined\n\n def trace_state(self, state, method, position):\n \"\"\"\n Makes a copy of the given state with the given method type.\n\n Args:\n state (State): The state to copy (as in, we trace a copy of it!)\n method (str): The operation being applied to the state.\n Returns:\n State: The new State\n \"\"\"\n return state.copy(method, position)\n\n def get_literal(self, node):\n if isinstance(node, ast.Num):\n return LiteralNum(node.n)\n elif isinstance(node, ast.Str):\n return LiteralStr(node.s)\n elif isinstance(node, ast.Tuple):\n values = []\n for elt in node.elts:\n subvalue = self.get_literal(elt)\n if subvalue is not None:\n values.append(subvalue)\n else:\n return None\n return LiteralTuple(values)\n elif isinstance(node, ast.Name):\n if node.id == \"None\":\n return LiteralNone()\n elif node.id == \"False\":\n return LiteralBool(False)\n elif node.id == \"True\":\n return LiteralBool(True)\n return None\n","src/lib/pedal/tifa/__init__.py":"\"\"\"\nPython Type Inferencer and Flow Analyzer (TIFA)\n\nTIFA uses a number of simplifications of the Python language.\n * Variables cannot change type\n * Variables cannot be deleted\n * Complex types have to be homogenous\n * No introspection or reflective characteristics\n * No dunder methods\n * No closures (maybe?)\n * You cannot write a variable out of scope\n * You cannot read a mutable variable out of scope\n * No multiple inheritance\n\nAdditionally, it reads the following as issues:\n * Cannot read a variable without having first written to it.\n * Cannot rewrite a variable unless it has been read.\n\nImportant concepts:\n\n.. glossary::\n\n Issue\n A problematic situation in the submitted code that will be reported\n but may not stop the execution. However, when an Issue occurs,\n any results may be invalid.\n\n Error\n A situation in execution that terminates the program.\n\n Name\n A name of a variable\n\n Scope\n The context of a function, with its own namespaces. Represented\n internally using numeric IDs (Scope IDs).\n\n Scope Chain\n A stack of scopes, with the innermost scope on top.\n\n Fully Qualified Name\n A string representation of a variable and its scope\n chain, written using \"/\". For example: 0/1/4/my_variable_name\n\n Path\n A single path of execution through the control flow; every program\n has at least one sequential path, but IFs, FORs, WHILEs, etc. can\n cause multiple paths. Paths are represented using numeric IDs (Path\n IDs).\n\n State\n Information about a Name that indicates things like the variable's\n current type and whether that name has been read, set, or\n overwritten.\n\n Identifier\n A wrapper around variables, used to hold their potential\n non-existence (which is an Issue but not an Error).\n\n Type\n A symbolic representation of the variable's type.\n\n Literal\n Sometimes, we need a specialized representation of a literal value\n to be passed around. This is particularly important for accessing\n elements in an tuples.\n\n Name Map\n (Path x Fully Qualified Names) => States\n\"\"\"\n\nfrom pedal.tifa.tifa import Tifa\nfrom pedal.report import MAIN_REPORT\n\nNAME = 'TIFA'\nSHORT_DESCRIPTION = \"Finds common issues caused by students.\"\nDESCRIPTION = '''Python Type Inferencer and Flow Analyzer (TIFA)\n\nTifa traverses an AST to detect common issues made by students.\n'''\nREQUIRES = ['Source']\nOPTIONALS = []\n\n\ndef tifa_analysis(python_3=True, report=None):\n \"\"\"\n Perform the TIFA analysis and attach the results to the Report.\n\n Args:\n python_3 (bool): Whether to expect a Python3 formated file, or Python\n 2. This has slight nuance on certain AST elements.\n report (:class:`Report`): The Report object to attach results to.\n Defaults to :data:`MAIN_REPORT`.\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n t = Tifa(python_3=python_3, report=report)\n t.process_code(report['source']['code'])\n return t\n\n\n__all__ = ['NAME', 'DESCRIPTION', 'SHORT_DESCRIPTION',\n 'REQUIRES', 'OPTIONALS',\n 'tifa_analysis', 'Tifa']\n","src/lib/pedal/toolkit/files.py":"from pedal.cait.cait_api import parse_program\nfrom pedal.report.imperative import explain\nfrom pedal.toolkit.utilities import ensure_literal\n\n\ndef files_not_handled_correctly(*filenames):\n \"\"\"\n Statically detect if files have been opened and closed correctly.\n This is only useful in the case of very simplistic file handling.\n \"\"\"\n if filenames and isinstance(filenames[0], int):\n num_filenames = filenames[0]\n actual_filenames = False\n else:\n num_filenames = len(filenames)\n actual_filenames = True\n ast = parse_program()\n calls = ast.find_all(\"Call\")\n called_open = []\n closed = []\n for a_call in calls:\n if a_call.func.ast_name == 'Name':\n if a_call.func.id == 'open':\n if not a_call.args:\n explain(\"You have called the open
function \"\n \"without any arguments. It needs a filename.\")\n return True\n called_open.append(a_call)\n elif a_call.func.id == 'close':\n explain(\"You have attempted to call close
as a \"\n \"function, but it is actually a method of the \"\n \"file object.\", 'verifier')\n return True\n elif a_call.func.ast_name == 'Attribute':\n if a_call.func.attr == 'open':\n explain(\"You have attempted to call open
as a \"\n \"method, but it is actually a built-in function.\")\n return True\n elif a_call.func.attr == 'close':\n closed.append(a_call)\n if len(called_open) < num_filenames:\n explain(\"You have not opened all the files you were supposed to.\")\n return True\n elif len(called_open) > num_filenames:\n explain(\"You have opened more files than you were supposed to.\")\n return True\n withs = ast.find_all(\"With\")\n if len(withs) + len(closed) < num_filenames:\n explain(\"You have not closed all the files you were supposed to.\")\n return True\n elif len(withs) + len(closed) > num_filenames:\n explain(\"You have closed more files than you were supposed to.\")\n return True\n if actual_filenames:\n return ensure_literal(*filenames)\n return False\n","src/lib/pedal/toolkit/functions.py":"from pedal.cait.cait_api import parse_program\nfrom pedal.report.imperative import gently, explain, gently_r, explain_r, MAIN_REPORT\nfrom pedal.sandbox import compatibility\nimport ast\n\nfrom pedal.toolkit.signatures import type_check, parse_type, normalize_type\n\nDELTA = 0.001\n\n\ndef all_documented():\n ast = parse_program()\n defs = ast.find_all('FunctionDef') + ast.find_all(\"ClassDef\")\n for a_def in defs:\n if a_def.name == \"__init__\":\n continue\n if (a_def.body and\n (a_def.body[0].ast_name != \"Expr\" or\n a_def.body[0].value.ast_name != \"Str\")):\n if a_def.ast_name == 'FunctionDef':\n gently(\"You have an undocumented function: \" + a_def.name)\n else:\n gently(\"You have an undocumented class: \" + a_def.name)\n return False\n return True\n\n\ndef get_arg_name(node):\n name = node.id\n if name is None:\n return node.arg\n else:\n return name\n\n\ndef match_function(name, root=None):\n if root is None:\n ast = parse_program()\n else:\n ast = root\n defs = ast.find_all('FunctionDef')\n for a_def in defs:\n if a_def._name == name:\n return a_def\n return None\n \ndef match_signature_muted(name, length, *parameters):\n ast = parse_program()\n defs = ast.find_all('FunctionDef')\n for a_def in defs:\n if a_def._name == name:\n found_length = len(a_def.args.args)\n if found_length != length:\n return None\n elif parameters:\n for parameter, arg in zip(parameters, a_def.args.args):\n arg_name = get_arg_name(arg)\n if arg_name != parameter:\n return None\n else:\n return a_def\n else:\n return a_def\n return None\n\n\ndef find_def_by_name(name, root=None):\n if root is None:\n root = parse_program()\n defs = root.find_all('FunctionDef')\n for a_def in defs:\n if a_def._name == name:\n return a_def\n return None\n\n\ndef match_parameters(name, *types, returns=None, root=None):\n defn = find_def_by_name(name, root)\n if defn:\n for expected, actual in zip(types, defn.args.args):\n if actual.annotation:\n if not isinstance(expected, str):\n expected = expected.__name__\n actual_type = parse_type(actual.annotation)\n if not type_check(expected, actual_type):\n gently_r(\"Error in definition of function `{}` parameter `{}`. Expected `{}`, \"\n \"instead found `{}`.\".format(name, actual.arg, expected, actual_type),\n \"wrong_parameter_type\")\n return None\n else:\n if returns is not None:\n if not isinstance(returns, str):\n returns = returns.__name__\n if defn.returns:\n actual_type = parse_type(defn.returns)\n if not type_check(returns, actual_type):\n gently_r(\"Error in definition of function `{}` return type. Expected `{}`, \"\n \"instead found {}.\".format(name, returns, actual_type),\n \"wrong_return_type\")\n return None\n else:\n gently_r(\"Error in definition of function `{}` return type. Expected `{}`, \"\n \"but there was no return type specified.\".format(name, returns),\n \"missing_return_type\")\n return None\n return defn\n\n\n\ndef match_signature(name, length, *parameters):\n ast = parse_program()\n defs = ast.find_all('FunctionDef')\n for a_def in defs:\n if a_def._name == name:\n found_length = len(a_def.args.args)\n if found_length < length:\n gently_r(\"The function named {}
has fewer parameters ({}) \"\n \"than expected ({}). \".format(name, found_length, length), \"insuff_args\")\n elif found_length > length:\n gently_r(\"The function named {}
has more parameters ({}) \"\n \"than expected ({}). \".format(name, found_length, length), \"excess_args\")\n elif parameters:\n for parameter, arg in zip(parameters, a_def.args.args):\n arg_name = get_arg_name(arg)\n if arg_name != parameter:\n gently_r(\"Error in definition of {}
. Expected a parameter named {}, \"\n \"instead found {}.\".format(name, parameter, arg_name), \"name_missing\")\n return None\n else:\n return a_def\n else:\n return a_def\n else:\n gently_r(\"No function named {name}
was found.\".format(name=name),\n \"missing_func_{name}\".format(name=name))\n return None\n\n\nGREEN_CHECK = \"✔ | \"\nRED_X = \"❌ | \"\n\n\ndef output_test(name, *tests):\n student = compatibility.get_student_data()\n if name in student.data:\n the_function = student.data[name]\n if callable(the_function):\n result = (\"\"\n \" | Arguments | Expected | Actual |
\"\n )\n success = True\n success_count = 0\n for test in tests:\n inp = test[:-1]\n inputs = ', '.join([\"{}
\".format(repr(i)) for i in inp])\n out = test[-1]\n tip = \"\"\n if isinstance(out, tuple):\n tip = out[1]\n out = out[0]\n message = \"{} | \" + (\"{} | \" * 2)\n test_out = compatibility.capture_output(the_function, *inp)\n if isinstance(out, str):\n if len(test_out) < 1:\n message = message.format(inputs, repr(out), \"No output\", tip)\n message = \"\" + RED_X + message + \"
\"\n if tip:\n message += \"\" + tip + \" |
\"\n success = False\n elif len(test_out) > 1:\n message = message.format(inputs, repr(out), \"Too many outputs\", tip)\n message = \"\" + RED_X + message + \"
\"\n if tip:\n message += \"\" + tip + \" |
\"\n success = False\n elif out not in test_out:\n message = message.format(inputs, repr(out), repr(test_out[0]), tip)\n message = \"\" + RED_X + message + \"
\"\n if tip:\n message += \"\" + tip + \" |
\"\n success = False\n else:\n message = message.format(inputs, repr(out), repr(test_out[0]), tip)\n message = \"\" + GREEN_CHECK + message + \"
\"\n success_count += 1\n elif out != test_out:\n if len(test_out) < 1:\n message = message.format(inputs, repr(out), \"No output\", tip)\n else:\n message = message.format(inputs, repr(out), repr(test_out[0]), tip)\n message = \"\" + RED_X + message + \"
\"\n if tip:\n message += \"\" + tip + \" |
\"\n success = False\n else:\n message = message.format(inputs, repr(out), repr(test_out[0]), tip)\n message = \"\" + GREEN_CHECK + message + \"
\"\n success_count += 1\n result += message\n if success:\n return the_function\n else:\n result = (\"I ran your function {}
on some new arguments, and it gave the wrong output \"\n \"{}/{} times.\".format(name, len(tests) - success_count, len(tests)) + result)\n gently_r(result + \"
\", \"wrong_output\")\n return None\n else:\n gently_r(\"You defined {}, but did not define it as a function.\".format(name), \"not_func_def\")\n return None\n else:\n gently_r(\"The function {}
was not defined.\".format(name), \"no_func_def\")\n return None\n\n\ndef unit_test(name, *tests):\n \"\"\"\n Show a table\n :param name:\n :param tests:\n :return:\n \"\"\"\n student = compatibility.get_student_data()\n if name in student.data:\n the_function = student.data[name]\n if callable(the_function):\n result = (\"\"\n \" | Arguments | Returned | Expected |
\"\n )\n success = True\n success_count = 0\n for test in tests:\n inp = test[:-1]\n inputs = ', '.join([\"{}
\".format(repr(i)) for i in inp])\n out = test[-1]\n tip = \"\"\n if isinstance(out, tuple):\n tip = out[1]\n out = out[0]\n message = (\"{} | \" * 3)\n test_out = the_function(*inp)\n message = message.format(inputs, repr(test_out), repr(out))\n if (isinstance(out, float) and\n isinstance(test_out, (float, int)) and\n abs(out - test_out) < DELTA):\n message = \"\" + GREEN_CHECK + message + \"
\"\n success_count += 1\n elif out != test_out:\n # gently(message)\n message = \"\" + RED_X + message + \"
\"\n if tip:\n message += \"\" + tip + \" |
\"\n success = False\n else:\n message = \"\" + GREEN_CHECK + message + \"
\"\n success_count += 1\n result += message\n if success:\n return the_function\n else:\n result = \"I ran your function {}
on some new arguments, \" \\\n \"and it failed {}/{} tests.\".format(name, len(tests) - success_count, len(tests)) + result\n gently_r(result + \"
\", \"tests_failed\")\n return None\n else:\n gently(\"You defined {}, but did not define it as a function.\".format(name))\n return None\n else:\n gently(\"The function {}
was not defined.\".format(name))\n return None\n\n\nclass _LineVisitor(ast.NodeVisitor):\n \"\"\"\n NodeVisitor subclass that visits every statement of a program and tracks\n their line numbers in a list.\n \n Attributes:\n lines (list[int]): The list of lines that were visited.\n \"\"\"\n\n def __init__(self):\n self.lines = []\n\n def _track_lines(self, node):\n self.lines.append(node.lineno)\n self.generic_visit(node)\n\n visit_FunctionDef = _track_lines\n visit_AsyncFunctionDef = _track_lines\n visit_ClassDef = _track_lines\n visit_Return = _track_lines\n visit_Delete = _track_lines\n visit_Assign = _track_lines\n visit_AugAssign = _track_lines\n visit_AnnAssign = _track_lines\n visit_For = _track_lines\n visit_AsyncFor = _track_lines\n visit_While = _track_lines\n visit_If = _track_lines\n visit_With = _track_lines\n visit_AsyncWith = _track_lines\n visit_Raise = _track_lines\n visit_Try = _track_lines\n visit_Assert = _track_lines\n visit_Import = _track_lines\n visit_ImportFrom = _track_lines\n visit_Global = _track_lines\n visit_Nonlocal = _track_lines\n visit_Expr = _track_lines\n visit_Pass = _track_lines\n visit_Continue = _track_lines\n visit_Break = _track_lines\n\n\ndef check_coverage(report=None):\n \"\"\"\n Checks that all the statements in the program have been executed.\n This function only works when a tracer_style has been set in the sandbox,\n or you are using an environment that automatically traces calls (e.g.,\n BlockPy).\n \n TODO: Make compatible with tracer_style='coverage'\n \n Args:\n report (Report): The Report to draw source code from; if not given,\n defaults to MAIN_REPORT.\n Returns:\n bool or set[int]: If the source file was not parsed, None is returned.\n If there were fewer lines traced in execution than are found in\n the AST, then the set of unexecuted lines are returned. Otherwise,\n False is returned.\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n if not report['source']['success']:\n return None, 0\n lines_executed = set(compatibility.trace_lines())\n if -1 in lines_executed:\n lines_executed.remove(-1)\n student_ast = report['source']['ast']\n visitor = _LineVisitor()\n visitor.visit(student_ast)\n lines_in_code = set(visitor.lines)\n if lines_executed < lines_in_code:\n return lines_in_code - lines_executed, len(lines_executed)/len(lines_in_code)\n else:\n return False, 1\n\n\ndef ensure_coverage(percentage=.5, destructive=False, report=None):\n \"\"\"\n Note that this avoids destroying the current sandbox instance stored on the\n report, if there is one present.\n \n Args:\n destructive (bool): Whether or not to remove the sandbox.\n \"\"\"\n if report is None:\n report = MAIN_REPORT\n student_code = report['source']['code']\n unexecuted_lines, percent_covered = check_coverage(report)\n if unexecuted_lines:\n if percent_covered <= percentage:\n gently(\"Your code coverage is not adequate. You must cover at least half your code to receive feedback.\")\n return False\n return True\n\n\ndef ensure_cisc108_tests(test_count, report=None):\n student = compatibility.get_student_data()\n if 'assert_equal' not in student.data:\n gently(\"You have not imported assert_equal from the cisc108 module.\")\n return False\n assert_equal = student.data['assert_equal']\n if not hasattr(assert_equal, 'student_tests'):\n gently(\"The assert_equal function has been modified. Do not let it be overwritten!\",\n label=\"Assertion Function Corrupted\")\n return False\n student_tests = assert_equal.student_tests\n if student_tests.tests == 0:\n gently(\"You are not unit testing the result.\", label=\"No Student Unit Tests\")\n return False\n elif student_tests.tests < test_count:\n gently(\"You have not written enough unit tests.\", label=\"Not Enough Student Unit Tests\")\n return False\n elif student_tests.failures > 0:\n gently(\"Your unit tests are not passing.\", label=\"Student Unit Tests Failing\")\n return False\n return True\n","src/lib/pedal/toolkit/imports.py":"from pedal.cait.cait_api import parse_program\nfrom pedal.report.imperative import explain\n\n\ndef ensure_imports(*modules):\n ast = parse_program()\n for module in modules:\n imports = ast.find_all(\"Import\")\n import_froms = ast.find_all(\"ImportFrom\")\n if not imports and not import_froms:\n explain(\"You need to import the {}
module.\".format(module))\n return True\n success = False\n if imports:\n if any(alias._name == module\n for i in imports\n for alias in i.names):\n success = True\n if import_froms:\n if any(i.module == module for i in import_froms):\n success = True\n if not success:\n explain(\"You need to import the {}
module.\".format(module))\n return True\n return False\n","src/lib/pedal/toolkit/plotting.py":"from pedal.toolkit.utilities import function_is_called\nfrom pedal.cait.cait_api import parse_program, def_use_error\nfrom pedal.report.imperative import gently, explain_r, gently_r\nfrom pedal.sandbox import compatibility\n\nPLOT_LABEL = {'plot': 'line plot',\n 'hist': 'histogram',\n 'scatter': 'scatter plot'}\n\n\ndef prevent_incorrect_plt():\n ast = parse_program()\n plts = [n for n in ast.find_all(\"Name\") if n.id == 'plt']\n if plts and def_use_error(plts[0]):\n # TODO: I converted this to the explain_r function, but I wasn't sure about the priority thing ~Luke Gusukuma\n # explain(\"You have imported the matplotlib.pyplot
module, \"\n # \"but you did not rename it to plt
using \"\n # \"import matplotlib.pyplot as plt
.
(plt_rename_err)\", 'verifier')\n explain_r(\"You have imported the matplotlib.pyplot
module, \"\n \"but you did not rename it to plt
using \"\n \"import matplotlib.pyplot as plt
.\",\n \"plt_rename_err\",\n priority='verifier')\n return True\n matplotlib_names = ['plot', 'hist', 'scatter',\n 'title', 'xlabel', 'ylabel', 'show']\n for name in matplotlib_names:\n for n in ast.find_all(\"Name\"):\n if n.id == name:\n if def_use_error(n):\n # explain((\"You have attempted to use the MatPlotLib \"\n # \"function named {0}
. However, you \"\n # \"imported MatPlotLib in a way that does not \"\n # \"allow you to use the function directly. I \"\n # \"recommend you use plt.{0}
instead, \"\n # \"after you use import matplotlib.pyplot as \"\n # \"plt
.
(plt_wrong_import)\").format(name), 'verifier')\n explain_r((\"You have attempted to use the MatPlotLib \"\n \"function named {0}
. However, you \"\n \"imported MatPlotLib in a way that does not \"\n \"allow you to use the function directly. I \"\n \"recommend you use plt.{0}
instead, \"\n \"after you use import matplotlib.pyplot as \"\n \"plt
.\").format(name),\n \"plt_wrong_import\",\n priority='verifier')\n return True\n return False\n\n\ndef ensure_correct_plot(function_name):\n for a_plot, label in PLOT_LABEL.items():\n if function_name == a_plot:\n if not function_is_called(function_name):\n gently_r(\"You are not calling the {func_name}
function.\".format(func_name=function_name),\n \"no_{func_name}_call\".format(func_name=function_name))\n return True\n elif function_is_called(a_plot):\n gently_r(\"You have called the {}
function, which makes a {}.\".format(a_plot, label),\n \"wrong_plt\")\n return True\n return False\n\n\ndef ensure_show():\n if not function_is_called(\"show\"):\n gently_r(\"You have not called show
function, which \"\n \"actually creates the graph.\", \"no_show\")\n return True\n return False\n\n\ndef compare_data(plt_type, correct, given):\n \"\"\"\n Determines whether the given data matches any of the data found in the\n correct data. This handles plots of different types: if a histogram\n was plotted with the expected data for a line plot, it will return True.\n\n Args:\n plt_type (str): The expected type of this plot\n correct (List of Int or List of List of Int): The expected data.\n given (Dict): The actual plotted data and information\n Returns:\n bool: Whether the correct data was found in the given plot.\n \"\"\"\n # Infer arguments\n if plt_type == 'hist':\n correct_xs = None\n correct_ys = correct\n elif not correct:\n correct_xs = []\n correct_ys = []\n elif isinstance(correct[0], (tuple, list)):\n # We were given a list of lists of ints\n correct_xs, correct_ys = correct\n else:\n # Assume it is a singular list\n correct_xs = list(range(len(correct)))\n correct_ys = correct\n\n if given['type'] == 'hist':\n return correct_ys == given['values']\n elif plt_type == 'hist':\n return correct_ys == given['y']\n else:\n return correct_xs == given['x'] and correct_ys == given['y']\n\n\nGRAPH_TYPES = {'line': 'line plot',\n 'hist': 'histogram',\n 'scatter': 'scatter plot'}\n\n\ndef check_for_plot(plt_type, data):\n \"\"\"\n Returns any errors found for this plot type and data.\n In other words, if it returns False, the plot was found correctly.\n \"\"\"\n if plt_type == 'plot':\n plt_type = 'line'\n type_found = False\n data_found = False\n for graph in compatibility.get_plots():\n for a_plot in graph['data']:\n data_found_here = compare_data(plt_type, data, a_plot)\n if a_plot['type'] == plt_type and data_found_here:\n return False\n if a_plot['type'] == plt_type:\n type_found = True\n if data_found_here:\n data_found = True\n plt_type = GRAPH_TYPES.get(plt_type, plt_type)\n if type_found and data_found:\n return (\"You have created a {}, but it does not have the right data. That data appears to have been plotted \"\n \"in another graph.
(other_plt)\".format(plt_type))\n elif type_found:\n return (\"You have created a {}, but it does not have the right data.\"\n \"
(wrong_plt_data)\".format(plt_type))\n elif data_found:\n return (\"You have plotted the right data, but you appear to have not plotted it as a {}.\"\n \"
(wrong_plt_type)\".format(plt_type))\n else:\n return (\"You have not created a {} with the proper data.\"\n \"
(no_plt)\".format(plt_type))\n\n\ndef check_for_plot_r(plt_type, data):\n \"\"\"\n Returns any errors found for this plot type and data.\n In other words, if it returns False, the plot was found correctly.\n \"\"\"\n if plt_type == 'plot':\n plt_type = 'line'\n type_found = False\n data_found = False\n for graph in compatibility.get_plots():\n for a_plot in graph['data']:\n data_found_here = compare_data(plt_type, data, a_plot)\n if a_plot['type'] == plt_type and data_found_here:\n return False\n if a_plot['type'] == plt_type:\n type_found = True\n if data_found_here:\n data_found = True\n plt_type = GRAPH_TYPES.get(plt_type, plt_type)\n if type_found and data_found:\n return {\"message\": \"You have created a {}, but it does not have the right data. \"\n \"That data appears to have been plotted in another graph.\".format(plt_type),\n \"code\": \"other_plt\",\n \"label\": \"Plotting Another Graph\"}\n elif type_found:\n return {\"message\": \"You have created a {}, but it does not have the right data.\".format(plt_type),\n \"code\": \"wrong_plt_data\",\n \"label\": \"Plot Data Incorrect\"}\n elif data_found:\n return {\"message\": \"You have plotted the right data, but you appear to have not plotted it as a {}.\".format(plt_type),\n \"code\": \"wrong_plt_type\",\n \"label\": \"Wrong Plot Type\"\n }\n else:\n return {\"message\": \"You have not created a {} with the proper data.\".format(plt_type),\n \"code\": \"no_plt\",\n \"label\": \"Missing Plot\"}\n","src/lib/pedal/toolkit/printing.py":"from pedal.report.imperative import gently_r\nfrom pedal.toolkit.utilities import find_function_calls, is_top_level\n\n\ndef ensure_prints(count):\n prints = find_function_calls('print')\n if not prints:\n gently_r(\"You are not using the print function!\", \"no_print\", label=\"Missing Print\")\n return False\n elif len(prints) > count:\n gently_r(\"You are printing too many times!\", \"multiple_print\", label=\"Too Many Prints\")\n return False\n elif len(prints) < count:\n gently_r(\"You are not printing enough things!\", \"too_few_print\", label=\"Too Few Prints\")\n return False\n else:\n for a_print in prints:\n if not is_top_level(a_print):\n gently_r(\"You have a print function that is not at the top level. That is incorrect for this problem!\",\n \"not_top_level_print\", label=\"Non-Top Level Print\")\n return False\n return prints\n","src/lib/pedal/toolkit/signatures.py":"import re\n\nfrom pedal.cait.cait_api import parse_program\nfrom pedal.report.imperative import gently, explain\n\n\"\"\"\nVerify indentation\n\nFormat:\n\n\nAny number of text. One final newline separates the next section.\n\nIf line is \"Args:\" or \"Returns:\"\n Next line will be a \"param (type): Description\" or \"type: Description\"\n If the next line is indented more than current level, then it is part of the previous part's description.\n Otherwise, new entry\n\n\"Note:\"\n Any level of indentation indicates\n\"\"\"\n\nPRIMITIVES = {\n 'text': ['text'],\n 'str': ['string', 'str', 'unicode'],\n 'bytes': ['bytes'],\n 'io': ['io'],\n 'file': ['file'],\n 'num': ['number', 'num', 'numeric'],\n 'int': ['int', 'integer'],\n 'float': ['float', 'floating'],\n 'bool': ['bool', 'boolean'],\n 'none': ['none'],\n 'any': ['any']\n}\nNORMALIZE_PRIMITIVES = {synonym: formal\n for formal, synonyms in PRIMITIVES.items()\n for synonym in synonyms}\nCONTAINERS = {\n 'list': (1, ['list']),\n 'set': (1, ['set']),\n 'optional': (1, ['optional', 'maybe']),\n 'dict': (2, ['dict', 'dictionary']),\n 'callable': (2, ['callable', 'function', 'func']),\n 'union': ('*', ['union', 'itemization']),\n 'tuple': ('*', ['tuple', 'pair']),\n}\nNORMALIZE_CONTAINERS = {synonym: formal\n for formal, (length, synonyms) in CONTAINERS.items()\n for synonym in synonyms}\n\nINHERITANCE = {\n 'int': 'num',\n 'float': 'num',\n 'bool': 'num',\n 'str': 'text',\n 'bytes': 'text',\n 'list': 'iterable',\n 'tuple': 'iterable',\n 'set': 'iterable',\n 'dict': 'iterable',\n 'file': 'iterable',\n 'text': 'iterable'\n}\n\nSPECIAL_PARAMETERS = [\"_returns\", \"yields\", \"prints\", \"_raises\",\n \"_report\", \"_root\"]\n\n'''\nType validation:\n Caps does not matter\n Primitives:\n Containers\n Unions\n X or Y\n X, Y, or Z\n X, Y, Z\n Function\n (X -> Y)\n \n list[int, str, or bool], dict[int: str], or bool or int\n'''\n\ndef parse_type_slice(slice):\n if slice.ast_name == \"Index\":\n return parse_type(slice.index)\n elif slice.ast_name == \"Slice\":\n return \"{}:{}\".format(parse_type(slice.lower), parse_type(slice.upper))\n elif slice.ast_name == \"ExtSlice\":\n return \", \".join(parse_type_slice(s) for s in slice.dims)\n\ndef parse_type(node):\n if node.ast_name == \"Str\":\n return node.s\n elif node.ast_name == \"Name\":\n return node.id\n elif node.ast_name == \"NameConstant\":\n return node.value\n elif node.ast_name == \"List\":\n return \"list[{}]\".format(\", \".join([parse_type(n) for n in node.elts]))\n elif node.ast_name == \"Dict\":\n return \"dict[{}]\".format(\", \".join([\"{}: {}\".format(parse_type(k), parse_type(v))\n for k,v in zip(node.keys, node.values)]))\n elif node.ast_name == \"Subscript\":\n return parse_type(node.value) + \"[{}]\".format(parse_type_slice(node.slice))\n elif node.ast_name == \"BoolOp\":\n if node.op.ast_name == \"Or\":\n return \" or \".join(parse_type(v) for v in node.values)\n return \"?\"\n\n\nclass SignatureException(Exception):\n pass\n\n\nclass Stack:\n def __init__(self, identifier=\"union\"):\n self.body = []\n self.identifier = identifier\n\n def append(self, value):\n self.body.append(value)\n\n def __repr__(self):\n return \"{}[{}]\".format(self.identifier, \", \".join(map(repr, self.body)))\n\n def __hash__(self):\n return hash(tuple(self.identifier, self.body))\n\n def __lt__(self, other):\n if isinstance(other, Stack):\n return self.identifier < other.identifier and self.body < other.body\n return self.identifier < other\n\n def __gt__(self, other):\n if isinstance(other, Stack):\n return self.identifier > other.identifier and self.body > other.body\n return self.identifier > other\n\n def __eq__(self, other):\n if isinstance(other, Stack):\n return self.identifier == other.identifier and self.body == other.body\n return False\n\n\ndef _normalize_identifier(identifier):\n if identifier in NORMALIZE_PRIMITIVES:\n return NORMALIZE_PRIMITIVES[identifier]\n elif identifier in NORMALIZE_CONTAINERS:\n return NORMALIZE_CONTAINERS[identifier]\n else:\n return identifier\n\n\nSPECIAL_SYMBOLS = r\"\\s*(->|\\s*[\\[\\],\\(\\)\\:]|or)\\s*\"\n\n\ndef _parse_tokens(tokens):\n result_stack = [Stack()]\n tokens = list(reversed(list(tokens)))\n while tokens:\n current = tokens.pop()\n # Ending a parenthetical, better stop here.\n if current == \")\":\n subexpression = result_stack.pop()\n result_stack[-1].append(subexpression)\n # Ending a square bracket, better stop here.\n elif current == \"]\":\n subexpression = result_stack.pop()\n result_stack[-1].append(subexpression)\n # We've reached the last token!\n elif not tokens:\n # And had no tokens before this one\n # Return the set of tokens\n result_stack[-1].append(_normalize_identifier(current))\n # Starting a parentheized expression\n elif current == \"(\":\n result_stack.append(Stack())\n # Nullary function\n elif current == \"->\":\n result_stack[-1].append(Stack(\"callable\"))\n elif current in (\"or\", \",\", \":\"):\n pass\n else:\n next = tokens.pop()\n # X or ...\n if current == \",\" and next == \"or\":\n tokens.append(next)\n if next in (\"or\", \",\", \"->\", \":\"):\n result_stack[-1].append(_normalize_identifier(current))\n # X [ ...\n elif next == \"[\":\n result_stack.append(Stack(_normalize_identifier(current)))\n else:\n tokens.append(next)\n result_stack[-1].append(_normalize_identifier(current))\n return result_stack.pop()\n\n\ndef sort_stacks(s):\n if isinstance(s, Stack):\n return (True, (s.identifier, s.body))\n return (False, s)\n\n\ndef normalize_type(t):\n t = t.strip()\n tokens = re.split(SPECIAL_SYMBOLS, t)\n tokens = [token for token in tokens if token]\n parsed = _parse_tokens(tokens)\n return parsed\n\n\ndef check_piece(left, right, indent=1):\n if type(left) != type(right):\n return False\n elif isinstance(left, Stack):\n if left.identifier != right.identifier:\n return False\n elif len(left.body) != len(right.body):\n return False\n elif left.identifier == \"union\":\n # Handle them in any order\n left.body.sort(key=sort_stacks)\n right.body.sort(key=sort_stacks)\n # Match them in exact order\n for l, r in zip(left.body, right.body):\n if not check_piece(l, r, indent=indent + 1):\n return False\n return True\n else:\n return left == right\n\n\ndef type_check(left, right):\n left = normalize_type(left)\n right = normalize_type(right)\n return check_piece(left, right)\n \ndef find_colon(str):\n parens_stack = []\n for i, character in enumerate(str):\n if character in '[(':\n parens_stack.append(character)\n elif character in '])':\n parens_stack.pop()\n elif character == ':' and not parens_stack:\n return i\n return 0\n \nARGS = ('args:', 'arg:', 'argument:', 'arguments:',\n 'parameters:', 'params:', 'parameter:', 'param:')\nARG_PATTERN = r'(.+)\\s*\\((.+)\\)\\s*:(.+)'\nRETURNS = ('returns:', 'return:')\ndef parse_docstring(doc):\n # First line's indentation may be different from rest - trust first\n # non empty line after the first one.\n # Remove taht number of spaces from subsequent lines\n # If Line is \"Args:\" or other special...\n # \n lines = doc.split(\"\\n\")\n body = [lines[0]]\n args = {}\n current_arg = None\n returns = []\n current_component = 'body'\n indentation = None\n inner_indentation = None\n for line in lines[1:]:\n # Blank line, not interesting!\n if not line.strip():\n continue\n # Get the actual text\n if indentation is None:\n indentation = len(line) - len(line.lstrip())\n line = line[indentation:]\n potential_command = line.lower().strip()\n # New command region?\n if potential_command in ARGS:\n current_component = 'args'\n inner_indentation = None\n continue\n elif potential_command in RETURNS:\n current_component = 'returns'\n inner_indentation = None\n continue\n # Okay, it's content - let's process it\n if current_component == 'body':\n body.append(line)\n else:\n if inner_indentation is None:\n inner_indentation = len(line) - len(line.lstrip())\n line = line[inner_indentation:]\n # Skip indented lines\n if not re.match(r'\\s', line):\n if current_component == 'args':\n match = re.search(ARG_PATTERN, line)\n current_arg = match.group(1)\n type_str = match.group(2)\n args[current_arg.strip()] = type_str.strip()\n elif current_component == 'returns':\n position = find_colon(line)\n return_type, comment = line[:position], line[position:]\n returns.append(return_type.strip())\n return body, args, ' or '.join(returns)\n\ndef function_signature(function_name, returns=None, yields=None,\n prints=None, raises=None, report=None, root=None,\n **kwargs):\n \"\"\"\n Determines whether the function with this signature is in the AST.\n \n TODO: Implement raises, prints, yields\n \"\"\"\n if root is None:\n root = parse_program()\n # If you encounter any special parameters with a \"_\", then fix their\n # name. This allows for students to have parameters with the given name.\n for special_parameter in SPECIAL_PARAMETERS:\n if special_parameter in kwargs:\n kwargs[special_parameter[1:]] = kwargs.pop(special_parameter)\n # Go get the actual docstring, parse it\n docstring = None\n for function_def in root.find_all(\"FunctionDef\"):\n if function_def._name == function_name:\n if function_def.body:\n if (function_def.body[0].ast_name == \"Expr\" and\n function_def.body[0].value.ast_name == \"Str\"):\n docstring = function_def.body[0].value.s\n # Try to match each element in turn.\n if docstring is None:\n return False\n\n try:\n body, args, parsed_returns = parse_docstring(docstring)\n except Exception as e:\n return [e], False\n failing_parameters = []\n for name, type in kwargs.items():\n if name in args:\n if not type_check(type, args[name]):\n failing_parameters.append(name)\n else:\n failing_parameters.append(name)\n if returns is None and not returns:\n return failing_parameters, True\n elif returns is not None and returns:\n return failing_parameters, type_check(parsed_returns, returns)\n else:\n return failing_parameters, False\n \n\ndef class_signature(class_name, report=None, root=None, **attributes):\n \"\"\"\n\n Args:\n class_name:\n **attributes:\n report:\n root:\n\n Returns:\n\n \"\"\"\n if root is None:\n root = parse_program()\n\n\n\"\"\"\n\n\"\"\"\n","src/lib/pedal/toolkit/upload.py":"import re\nfrom pedal.source import get_program\nfrom pedal.sandbox.compatibility import get_output\nfrom pedal.report.imperative import gently, explain\n\n\n# Feedback for author's name\ndef check_author_name_on_header():\n code = get_program()\n m_author = re.search('Author: \\\\w+', code)\n if not m_author:\n gently(\"You need to add your name to the author field at the top of the file.\"\n \"
(name_missing)\")\n\n\ndef get_plots(output):\n # The p[0] is the first plot in a graph/show\n return [p[0] for p in output if isinstance(p[0], dict)]\n\n\ndef find_plot_of_type(plot_list, plot_type):\n return [p['data'] for p in plot_list if p['type'] == plot_type]\n\n\n# Feedback for copying output of the program in the documentation\ndef check_output_on_header(expected_output):\n code = get_program()\n expected_output = str(expected_output)\n between_stars = code.split(\"*****\")[2].strip()\n between_stars = \"\\\\n\".join([x.strip() for x in between_stars.split(\"\\\\n\")])\n if 'REPLACE THIS TEXT WITH THE OUTPUT OF THIS PROGRAM' in between_stars:\n gently(\"In your code, you need to 'REPLACE THIS TEXT WITH THE OUTPUT OF THIS PROGRAM'\"\n \"
(wrong_output_blank)\")\n elif expected_output not in between_stars:\n gently(\"The output you copied between the *****, seems to be incorrect. You may have copied it into the wrong \"\n \"location, or it is incomplete.
(wrong_output_fill)\")\n\n\ndef check_problem_submission(prob_id):\n if prob_id not in get_program():\n explain(\"Make sure that you are turning in {}
(wrong_problem)\".format(prob_id))\n return True\n\n\ndef check_print_output(multiple_lines):\n for line in multiple_lines:\n if line not in get_output():\n gently(\"You are not doing the correct calculation
(catch_all)\")\n return True\n\n\ndef find_in_code(regex):\n code = get_program()\n return re.search(regex, code)\n","src/lib/pedal/toolkit/utilities.py":"from pedal.cait.cait_api import parse_program\nfrom pedal.report.imperative import gently, explain\nfrom pedal.report.imperative import gently_r, explain_r\n\ndef is_top_level(ast_node):\n ast = parse_program()\n for element in ast.body:\n if element.ast_name == 'Expr':\n if element.value == ast_node:\n return True\n elif element == ast_node:\n return True\n return False\n\n\ndef no_nested_function_definitions():\n ast = parse_program()\n defs = ast.find_all('FunctionDef')\n for a_def in defs:\n if not is_top_level(a_def):\n gently(\"You have defined a function inside of another block. For instance, you may have placed it inside \"\n \"another function definition, or inside of a loop. Do not nest your function definition!\"\n \"
(nest_func)\")\n return False\n return True\n\n\ndef function_prints():\n ast = parse_program()\n defs = ast.find_all('FunctionDef')\n for a_def in defs:\n all_calls = a_def.find_all('Call')\n for a_call in all_calls:\n if a_call.func.ast_name == 'Name':\n if a_call.func.id == 'print':\n return True\n return False\n\n\ndef find_function_calls(name, root=None):\n if root is None:\n root = parse_program()\n all_calls = root.find_all('Call')\n calls = []\n for a_call in all_calls:\n if a_call.func.ast_name == 'Attribute':\n if a_call.func.attr == name:\n calls.append(a_call)\n elif a_call.func.ast_name == 'Name':\n if a_call.func.id == name:\n calls.append(a_call)\n return calls\n\n\ndef function_is_called(name):\n return len(find_function_calls(name))\n\n\ndef no_nonlist_nums():\n pass\n\n\ndef only_printing_variables():\n ast = parse_program()\n all_calls = ast.find_all('Call')\n for a_call in all_calls:\n if a_call.func.ast_name == 'Name' and a_call.func.id == \"print\":\n for arg in a_call.args:\n if arg.ast_name != \"Name\":\n return False\n elif arg.id in ('True', 'False', 'None'):\n return False\n return True\n\n\ndef find_prior_initializations(node):\n if node.ast_name != \"Name\":\n return None\n ast = parse_program()\n assignments = ast.find_all(\"Assign\")\n cur_line_no = node.lineno\n all_assignments = []\n for assignment in assignments:\n if assignment.has(node):\n if assignment.lineno < cur_line_no:\n all_assignments.append(assignment)\n return all_assignments\n\n\ndef prevent_unused_result():\n ast = parse_program()\n exprs = ast.find_all('Expr')\n for expr in exprs:\n if expr.value.ast_name == \"Call\":\n a_call = expr.value\n if a_call.func.ast_name == 'Attribute':\n if a_call.func.attr == 'append':\n pass\n elif a_call.func.attr in ('replace', 'strip', 'lstrip', 'rstrip'):\n gently(\"Remember! You cannot modify a string directly. Instead, you should assign the result back \"\n \"to the string variable.
(str_mutate)\")\n\n\ndef prevent_builtin_usage(function_names):\n message = \"You cannot use the builtin function {}
.\"\n code = \"builtin_use\"\n label = \"Builtin Usage\"\n # Prevent direction calls\n ast = parse_program()\n all_calls = ast.find_all('Call')\n for a_call in all_calls:\n if a_call.func.ast_name == 'Name':\n if a_call.func.id in function_names:\n explain_r(message.format(a_call.func.id), code, label=label)\n return a_call.func.id\n return None\n\n\ndef find_negatives(root=None):\n if root is None:\n root = parse_program()\n return [-op.operand.n for op in root.find_all(\"UnaryOp\")\n if op.op.ast_name == \"USub\" and op.operand.ast_name == \"Num\"]\n\n\n# TODO: UGLY HACK. This is to avoid muted=False kwargs in the following\n# functions. Apparently skulpt doesn't support this syntax.\nmuted = False\n\n\ndef prevent_literal(*literals):\n \"\"\"\n Confirms that the literal is not in the code, returning False if it is not.\n \n Args:\n *literals (Any...): A series of literal values to look for.\n Returns:\n AstNode or False: If the literal is found in the code, then it is returned.\n \"\"\"\n message = \"Do not use the literal value {}
in your code.\"\n code = \"hard_code\"\n label = \"Hard Coding\"\n ast = parse_program()\n str_values = [s.s for s in ast.find_all(\"Str\")]\n num_values = [n.n for n in ast.find_all(\"Num\")]\n negative_values = find_negatives(ast)\n name_values = ([name.id for name in ast.find_all(\"Name\")]+\n [name.value for name in ast.find_all(\"NameConstant\")])\n for literal in literals:\n if isinstance(literal, (int, float)):\n if literal in num_values or literal in negative_values:\n if not muted:\n explain_r(message.format(repr(literal)), code, label=label)\n return literal\n elif isinstance(literal, str):\n if literal in str_values:\n if not muted:\n explain_r(message.format(repr(literal)), code, label=label)\n return literal\n elif literal in (True, False, None):\n if str(literal) in name_values:\n if not muted:\n explain_r(message.format(repr(literal)), code, label=label)\n return literal\n return False\n\n\ndef ensure_literal(*literals):\n \"\"\"\n Confirms that the literal IS in the code, returning False if it is not.\n \n Args:\n *literals (Any...): A series of literal values to look for.\n Returns:\n AstNode or False: If the literal is found in the code, then it is returned.\n \"\"\"\n message = \"You need the literal value {}
in your code.\"\n code = \"missing_literal\"\n label = \"Missing Literal\"\n ast = parse_program()\n str_values = [s.s for s in ast.find_all(\"Str\")]\n num_values = [n.n for n in ast.find_all(\"Num\")]\n negative_values = find_negatives(ast)\n name_values = ([str(name.id) for name in ast.find_all(\"Name\")]+\n [str(name.value) for name in ast.find_all(\"NameConstant\")])\n for literal in literals:\n if literal in (True, False, None):\n if str(literal) not in name_values:\n if not muted:\n explain_r(message.format(repr(literal)), code, label=label)\n return True\n elif isinstance(literal, (int, float)):\n if literal not in num_values and literal not in negative_values:\n if not muted:\n explain_r(message.format(repr(literal)), code, label=label)\n return literal\n elif isinstance(literal, str):\n if literal not in str_values:\n if not muted:\n explain_r(message.format(repr(literal)), code, label=label)\n return literal\n return False\n\n\ndef prevent_advanced_iteration():\n message = \"You should not use a while
loop to solve this problem.\"\n code = \"while_usage\"\n label = \"Usage of while
\"\n ast = parse_program()\n if ast.find_all('While'):\n explain_r(message, code, label=label)\n prevent_builtin_usage(['sum', 'map', 'filter', 'reduce', 'len', 'max', 'min',\n 'max', 'sorted', 'all', 'any', 'getattr', 'setattr',\n 'eval', 'exec', 'iter'])\n\n\nCOMPARE_OP_NAMES = {\n \"==\": \"Eq\",\n \"<\": \"Lt\",\n \"<=\": \"Lte\",\n \">=\": \"Gte\",\n \">\": \"Gt\",\n \"!=\": \"NotEq\",\n \"is\": \"Is\",\n \"is not\": \"IsNot\",\n \"in\": \"In\",\n \"not in\": \"NotIn\"}\nBOOL_OP_NAMES = {\n \"and\": \"And\",\n \"or\": \"Or\"}\nBIN_OP_NAMES = {\n \"+\": \"Add\",\n \"-\": \"Sub\",\n \"*\": \"Mult\",\n \"/\": \"Div\",\n \"//\": \"FloorDiv\",\n \"%\": \"Mod\",\n \"**\": \"Pow\",\n \">>\": \"LShift\",\n \"<<\": \"RShift\",\n \"|\": \"BitOr\",\n \"^\": \"BitXor\",\n \"&\": \"BitAnd\",\n \"@\": \"MatMult\"}\nUNARY_OP_NAMES = {\n # \"+\": \"UAdd\",\n # \"-\": \"USub\",\n \"not\": \"Not\",\n \"~\": \"Invert\"\n}\n\n\ndef ensure_operation(op_name, root=None):\n message = \"You are not using the {}
operator.\".format(op_name)\n code = \"missing_op\"\n label = \"Missing {}
Operator\".format(op_name)\n if root is None:\n root = parse_program()\n result = find_operation(op_name, root)\n if not result:\n gently_r(message, code, label)\n return result\n\n\ndef prevent_operation(op_name, root=None):\n message = \"You may not use the {}
operator.\".format(op_name)\n code = \"bad_op\"\n label = \"Bad Operator\".format(op_name)\n if root is None:\n root = parse_program()\n result = find_operation(op_name, root)\n if result:\n gently_r(message, code, label=label)\n return result\n\n\ndef find_operation(op_name, root):\n if op_name in COMPARE_OP_NAMES:\n compares = root.find_all(\"Compare\")\n for compare in compares:\n for op in compare.ops:\n if op.ast_name == COMPARE_OP_NAMES[op_name]:\n return compare\n elif op_name in BOOL_OP_NAMES:\n boolops = root.find_all(\"BoolOp\")\n for boolop in boolops:\n if boolop.op_name == BOOL_OP_NAMES[op_name]:\n return boolop\n elif op_name in BIN_OP_NAMES:\n binops = root.find_all(\"BinOp\")\n for binop in binops:\n if binop.op_name == BIN_OP_NAMES[op_name]:\n return binop\n elif op_name in UNARY_OP_NAMES:\n unaryops = root.find_all(\"UnaryOp\")\n for unaryop in unaryops:\n if unaryop.op_name == UNARY_OP_NAMES[op_name]:\n return unaryop\n return False\n\n\ndef ensure_recursion(function_name, root=None):\n if root is None:\n root = parse_program()\n all_calls = root.find_all('Call')\n calls = []\n for a_call in all_calls:\n if a_call.func.ast_name == 'Attribute':\n if a_call.func.attr == function_name:\n calls.append(a_call)\n elif a_call.func.ast_name == 'Name':\n if a_call.func.id == function_name:\n calls.append(a_call)\n return calls\n\n\ndef ensure_assignment(variable_name, type=None, value=None, root=None):\n \"\"\"\n Consumes a variable name\n TODO: Implement the value parameter\n\n :param variable_name: The variable name the student is expected to define.\n :type variable_name: str\n :param type: The string type of the node on the right side of the\n assignment. Check GreenTreeSnakes (e.g., \"Num\", or \"Str\").\n :type type: str\n :return: False or str\n \"\"\"\n if root is None:\n root = parse_program()\n assignments = root.find_all(\"Assign\")\n potentials = []\n for assign in assignments:\n if assign.targets[0].ast_name != \"Name\":\n continue\n if assign.targets[0].id == variable_name:\n potentials.append(assign)\n if type is None:\n return assign\n elif (type == 'Bool' and\n assign.value.ast_name == 'Name' and\n assign.value.id in ('True', 'False')):\n return assign\n elif (type == 'Bool' and\n assign.value.ast_name == 'NameConstant' and\n assign.value.value in (True, False)):\n return assign\n elif assign.value.ast_name == type:\n return assign\n if potentials and potentials[0].value.ast_name not in (\"Str\", \"Bool\", \"Num\", \"List\", \"Tuple\"):\n explain_r((\"You needed to assign a literal value to {variable}, but you \"\n \"created an expression instead.\").format(variable=variable_name), \"exp_vs_lit\",\n label=\"Expression Instead of Literal\")\n elif type is None:\n explain_r((\"You have not properly assigned anything to the variable \"\n \"{variable}.\").format(variable=variable_name), \"no_assign\", label=\"No Proper Assignment\")\n else:\n explain_r((\"You have not assigned a {type} to the variable {variable}.\"\n \"\").format(type=type, variable=variable_name), \"type_assign\", label=\"Unexpected Variable Type\")\n return False\n","src/lib/pedal/toolkit/__init__.py":"","src/lib/pedal/__init__.py":"\"\"\"\nA package for analyzing student code.\n\"\"\"\n\n# Probably want to import useful stuff from:\n# report\n# source\n# sandbox\n# tifa\n# cait\n# resolver\n# etc.\n\nfrom pedal.cait import (find_match, find_matches,\n parse_program,\n find_submatches, find_expr_sub_matches,\n def_use_error, data_state, data_type,\n expire_cait_cache)\nfrom pedal.report.imperative import (suppress, explain, compliment,\n give_partial, gently, set_success)\nfrom pedal.sandbox.sandbox import run, reset\nfrom pedal.tifa import tifa_analysis\nfrom pedal.source import (set_source, check_section_exists, next_section,\n set_source_file)\n","src/lib/pickle.py":"raise NotImplementedError(\"pickle is not yet implemented in Skulpt\")\n","src/lib/pickletools.py":"raise NotImplementedError(\"pickletools is not yet implemented in Skulpt\")\n","src/lib/PIL/__init__.js":"var $builtinmodule=function(){function a(a,b){this.lastResult=b,this.lastError=a}function b(a){return new Promise(function(b,c){if(void 0!==Sk.PIL.assets[a])b(Sk.PIL.assets[a]);else{var d=new Image;d.onload=function(){Sk.PIL.assets[a]=this,b(this)},d.onerror=function(){c(a)},d.src=a}})}var c;c={__name__:\"PIL\"},Sk.PIL||(Sk.PIL={assets:{}}),a.prototype.then=function(a){if(this.lastError)return this;try{this.lastResult=a(this.lastResult)}catch(a){this.lastResult=void 0,this.lastError=a}return this.lastResult instanceof Promise?this.lastResult:this},a.prototype.catch=function(a){if(this.lastError)try{this.lastResult=a(this.lastError),this.lastError=void 0}catch(a){this.lastResult=void 0,this.lastError=a}return this.lastResult instanceof Promise?this.lastResult:this};return c.Image=Sk.misceval.buildClass(c,function(a,c){c.__init__=new Sk.builtin.func(function(){}),c.open=new Sk.builtin.func(function(a,c){Sk.builtin.pyCheckArgs(\"open\",arguments,2,2),Sk.builtin.pyCheckType(\"file_or_url\",\"string\",Sk.builtin.checkString(c)),a.file_or_url=c;var d=b(Sk.ffi.remapToJs(c)),e=new Sk.misceval.Suspension;return a.image=Sk.builtin.none.none$,e.resume=function(){if(e.data.error)throw e.data.error;else return console.log(\"RESUMED\"),a.image},e.data={type:\"Sk.promise\",promise:d.then(function(b){return console.log(\"PROMISED\"),a.image=b,a.canvas=document.createElement(\"canvas\"),a.canvas.width=a.image.width,a.canvas.height=a.image.height,console.log(a.image),console.log(a.image.width,a.image.height),a.canvas.getContext(\"2d\").drawImage(a.image,0,0,a.image.width,a.image.height),a.pixels=a.canvas.getContext(\"2d\").getImageData(0,0,a.image.width,a.image.height).data,console.log(a.pixels),b},function(b){throw a.image=\"\",b})},e}),c.show=new Sk.builtin.func(function(a){if(void 0===Sk.console)throw new Sk.builtin.NameError(\"Can not resolve drawing area. Sk.console is undefined!\");var b={image:a.image,file_or_url:a.file_or_url};Sk.console.printPILImage(b)})},\"Image\",[]),c};","src/lib/pipes.py":"raise NotImplementedError(\"pipes is not yet implemented in Skulpt\")\n","src/lib/pkgutil.py":"raise NotImplementedError(\"pkgutil is not yet implemented in Skulpt\")\n","src/lib/platform.js":"var $builtinmodule=function(){var a={},b=\"undefined\"!=typeof window&&\"undefined\"!=typeof window.navigator;return a.python_implementation=new Sk.builtin.func(function(){return Sk.builtin.pyCheckArgsLen(\"python_implementation\",arguments.length,0,0),new Sk.builtin.str(\"Skulpt\")}),a.node=new Sk.builtin.func(function(){return Sk.builtin.pyCheckArgsLen(\"node\",arguments.length,0,0),new Sk.builtin.str(\"\")}),a.version=new Sk.builtin.func(function(){return Sk.builtin.pyCheckArgsLen(\"version\",arguments.length,0,0),new Sk.builtin.str(\"\")}),a.python_version=new Sk.builtin.func(function(){var a;return Sk.builtin.pyCheckArgsLen(\"python_version\",arguments.length,0,0),a=Sk.__future__.python_version?\"3.2.0\":\"2.7.0\",new Sk.builtin.str(a)}),a.system=new Sk.builtin.func(function(){var a;return Sk.builtin.pyCheckArgsLen(\"system\",arguments.length,0,0),a=b?window.navigator.appCodeName:\"\",new Sk.builtin.str(a)}),a.machine=new Sk.builtin.func(function(){var a;return Sk.builtin.pyCheckArgsLen(\"machine\",arguments.length,0,0),a=b?window.navigator.platform:\"\",new Sk.builtin.str(a)}),a.release=new Sk.builtin.func(function(){var a;return Sk.builtin.pyCheckArgsLen(\"release\",arguments.length,0,0),a=b?window.navigator.appVersion:\"\",new Sk.builtin.str(a)}),a.architecture=new Sk.builtin.func(function(){return Sk.builtin.pyCheckArgsLen(\"architecture\",arguments.length,0,0),new Sk.builtin.tuple([new Sk.builtin.str(\"64bit\"),new Sk.builtin.str(\"\")])}),a.processor=new Sk.builtin.func(function(){return Sk.builtin.pyCheckArgsLen(\"processor\",arguments.length,0,0),new Sk.builtin.str(\"\")}),a};","src/lib/platform.py":"raise NotImplementedError(\"platform is not yet implemented in Skulpt\")\n","src/lib/plistlib.py":"raise NotImplementedError(\"plistlib is not yet implemented in Skulpt\")\n","src/lib/popen2.py":"raise NotImplementedError(\"popen2 is not yet implemented in Skulpt\")\n","src/lib/poplib.py":"raise NotImplementedError(\"poplib is not yet implemented in Skulpt\")\n","src/lib/posixfile.py":"raise NotImplementedError(\"posixfile is not yet implemented in Skulpt\")\n","src/lib/posixpath.py":"\"\"\"Common operations on Posix pathnames.\n\nInstead of importing this module directly, import os and refer to\nthis module as os.path. The \"os.path\" name is an alias for this\nmodule on Posix systems; on other systems (e.g. Windows),\nos.path provides the same operations in a manner specific to that\nplatform, and is an alias to another module (e.g. ntpath).\n\nSome of this can actually be useful on non-Posix systems too, e.g.\nfor manipulation of the pathname component of URLs.\n\n# acbart 7/8/2019: Changed all b'' strings to regular strings.\n\"\"\"\n\n# Strings representing various path-related bits and pieces.\n# These are primarily for export; internally, they are hardcoded.\n# Should be set before imports for resolving cyclic dependency.\ncurdir = '.'\npardir = '..'\nextsep = '.'\nsep = '/'\npathsep = ':'\ndefpath = '/bin:/usr/bin'\naltsep = None\ndevnull = '/dev/null'\n\nimport os\nimport sys\nimport stat\nimport genericpath\nfrom genericpath import *\n\n__all__ = [\"normcase\",\"isabs\",\"join\",\"splitdrive\",\"split\",\"splitext\",\n \"basename\",\"dirname\",\"commonprefix\",\"getsize\",\"getmtime\",\n \"getatime\",\"getctime\",\"islink\",\"exists\",\"lexists\",\"isdir\",\"isfile\",\n \"ismount\", \"expanduser\",\"expandvars\",\"normpath\",\"abspath\",\n \"samefile\",\"sameopenfile\",\"samestat\",\n \"curdir\",\"pardir\",\"sep\",\"pathsep\",\"defpath\",\"altsep\",\"extsep\",\n \"devnull\",\"realpath\",\"supports_unicode_filenames\",\"relpath\",\n \"commonpath\"]\n\n\ndef fspath(val):\n return val\n\n\nos.getcwd = lambda: \"\"\n\n\ndef _get_sep(path):\n if isinstance(path, str):\n return '/' #'/'\n else:\n return '/'\n\n# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.\n# On MS-DOS this may also turn slashes into backslashes; however, other\n# normalizations (such as optimizing '../' away) are not allowed\n# (another function should be defined to do that).\n\ndef normcase(s):\n \"\"\"Normalize case of pathname. Has no effect under Posix\"\"\"\n return fspath(s)\n\n\n# Return whether a path is absolute.\n# Trivial in Posix, harder on the Mac or MS-DOS.\n\ndef isabs(s):\n \"\"\"Test whether a path is absolute\"\"\"\n s = fspath(s)\n sep = _get_sep(s)\n return s.startswith(sep)\n\n\n# Join pathnames.\n# Ignore the previous parts if a part is absolute.\n# Insert a '/' unless the first part is empty or already ends in '/'.\n\ndef join(a, *p):\n \"\"\"Join two or more pathname components, inserting '/' as needed.\n If any component is an absolute path, all previous path components\n will be discarded. An empty last part will result in a path that\n ends with a separator.\"\"\"\n a = fspath(a)\n sep = _get_sep(a)\n path = a\n try:\n if not p:\n path[:0] + sep #23780: Ensure compatible data type even if p is null.\n for b in map(fspath, p):\n if b.startswith(sep):\n path = b\n elif not path or path.endswith(sep):\n path += b\n else:\n path += sep + b\n except (TypeError, AttributeError, BytesWarning):\n genericpath._check_arg_types('join', a, *p)\n raise\n return path\n\n\n# Split a path in head (everything up to the last '/') and tail (the\n# rest). If the path ends in '/', tail will be empty. If there is no\n# '/' in the path, head will be empty.\n# Trailing '/'es are stripped from head unless it is the root.\n\ndef split(p):\n \"\"\"Split a pathname. Returns tuple \"(head, tail)\" where \"tail\" is\n everything after the final slash. Either part may be empty.\"\"\"\n p = fspath(p)\n sep = _get_sep(p)\n i = p.rfind(sep) + 1\n head, tail = p[:i], p[i:]\n if head and head != sep*len(head):\n head = head.rstrip(sep)\n return head, tail\n\n\n# Split a path in root and extension.\n# The extension is everything starting at the last dot in the last\n# pathname component; the root is everything before that.\n# It is always true that root + ext == p.\n\ndef splitext(p):\n p = fspath(p)\n if isinstance(p, str):\n sep = '/'\n extsep = '.'\n else:\n sep = '/'\n extsep = '.'\n return genericpath._splitext(p, sep, None, extsep)\n#splitext.__doc__ = genericpath._splitext.__doc__\n\n# Split a pathname into a drive specification and the rest of the\n# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.\n\ndef splitdrive(p):\n \"\"\"Split a pathname into drive and path. On Posix, drive is always\n empty.\"\"\"\n p = fspath(p)\n return p[:0], p\n\n\n# Return the tail (basename) part of a path, same as split(path)[1].\n\ndef basename(p):\n \"\"\"Returns the final component of a pathname\"\"\"\n p = fspath(p)\n sep = _get_sep(p)\n i = p.rfind(sep) + 1\n return p[i:]\n\n\n# Return the head (dirname) part of a path, same as split(path)[0].\n\ndef dirname(p):\n \"\"\"Returns the directory component of a pathname\"\"\"\n p = fspath(p)\n sep = _get_sep(p)\n i = p.rfind(sep) + 1\n head = p[:i]\n if head and head != sep*len(head):\n head = head.rstrip(sep)\n return head\n\n\n# Is a path a symbolic link?\n# This will always return false on systems where os.lstat doesn't exist.\n\ndef islink(path):\n \"\"\"Test whether a path is a symbolic link\"\"\"\n try:\n st = os.lstat(path)\n except (OSError, ValueError, AttributeError):\n return False\n return stat.S_ISLNK(st.st_mode)\n\n# Being true for dangling symbolic links is also useful.\n\ndef lexists(path):\n \"\"\"Test whether a path exists. Returns True for broken symbolic links\"\"\"\n try:\n os.lstat(path)\n except (OSError, ValueError):\n return False\n return True\n\n\n# Is a path a mount point?\n# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)\n\ndef ismount(path):\n \"\"\"Test whether a path is a mount point\"\"\"\n try:\n s1 = os.lstat(path)\n except (OSError, ValueError):\n # It doesn't exist -- so not a mount point. :-)\n return False\n else:\n # A symlink can never be a mount point\n if stat.S_ISLNK(s1.st_mode):\n return False\n\n if isinstance(path, str):\n parent = join(path, '..')\n else:\n parent = join(path, '..')\n parent = realpath(parent)\n try:\n s2 = os.lstat(parent)\n except (OSError, ValueError):\n return False\n\n dev1 = s1.st_dev\n dev2 = s2.st_dev\n if dev1 != dev2:\n return True # path/.. on a different device as path\n ino1 = s1.st_ino\n ino2 = s2.st_ino\n if ino1 == ino2:\n return True # path/.. is the same i-node as path\n return False\n\n\n# Expand paths beginning with '~' or '~user'.\n# '~' means $HOME; '~user' means that user's home directory.\n# If the path doesn't begin with '~', or if the user or $HOME is unknown,\n# the path is returned unchanged (leaving error reporting to whatever\n# function is called with the expanded path as argument).\n# See also module 'glob' for expansion of *, ? and [...] in pathnames.\n# (A function should also be defined to do full *sh-style environment\n# variable expansion.)\n\ndef expanduser(path):\n \"\"\"Expand ~ and ~user constructions. If user or $HOME is unknown,\n do nothing.\"\"\"\n path = fspath(path)\n if isinstance(path, str):\n tilde = '~'\n else:\n tilde = '~'\n if not path.startswith(tilde):\n return path\n sep = _get_sep(path)\n i = path.find(sep, 1)\n if i < 0:\n i = len(path)\n if i == 1:\n if 'HOME' not in os.environ:\n import pwd\n try:\n userhome = pwd.getpwuid(os.getuid()).pw_dir\n except KeyError:\n # bpo-10496: if the current user identifier doesn't exist in the\n # password database, return the path unchanged\n return path\n else:\n userhome = os.environ['HOME']\n else:\n import pwd\n name = path[1:i]\n if isinstance(name, str):\n name = str(name, 'ASCII')\n try:\n pwent = pwd.getpwnam(name)\n except KeyError:\n # bpo-10496: if the user name from the path doesn't exist in the\n # password database, return the path unchanged\n return path\n userhome = pwent.pw_dir\n if isinstance(path, str):\n userhome = os.fsencode(userhome)\n root = '/'\n else:\n root = '/'\n userhome = userhome.rstrip(root)\n return (userhome + path[i:]) or root\n\n\n# Expand paths containing shell variable substitutions.\n# This expands the forms $variable and ${variable} only.\n# Non-existent variables are left unchanged.\n\n_varprog = None\n_varprogb = None\n\ndef expandvars(path):\n \"\"\"Expand shell variables of form $var and ${var}. Unknown variables\n are left unchanged.\"\"\"\n path = fspath(path)\n global _varprog, _varprogb\n if isinstance(path, str):\n if '$' not in path:\n return path\n if not _varprogb:\n import re\n _varprogb = re.compile(r'\\$(\\w+|\\{[^}]*\\})', re.ASCII)\n search = _varprogb.search\n start = '{'\n end = '}'\n environ = getattr(os, 'environ', None)\n else:\n if '$' not in path:\n return path\n if not _varprog:\n import re\n _varprog = re.compile(r'\\$(\\w+|\\{[^}]*\\})', re.ASCII)\n search = _varprog.search\n start = '{'\n end = '}'\n environ = os.environ\n i = 0\n while True:\n m = search(path, i)\n if not m:\n break\n i, j = m.span(0)\n name = m.group(1)\n if name.startswith(start) and name.endswith(end):\n name = name[1:-1]\n try:\n if environ is None:\n value = os.fsencode(os.environ[os.fsdecode(name)])\n else:\n value = environ[name]\n except KeyError:\n i = j\n else:\n tail = path[j:]\n path = path[:i] + value\n i = len(path)\n path += tail\n return path\n\n\n# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.\n# It should be understood that this may change the meaning of the path\n# if it contains symbolic links!\n\ndef normpath(path):\n \"\"\"Normalize path, eliminating double slashes, etc.\"\"\"\n path = fspath(path)\n if isinstance(path, str):\n sep = '/'\n empty = ''\n dot = '.'\n dotdot = '..'\n else:\n sep = '/'\n empty = ''\n dot = '.'\n dotdot = '..'\n if path == empty:\n return dot\n initial_slashes = path.startswith(sep)\n # POSIX allows one or two initial slashes, but treats three or more\n # as single slash.\n if (initial_slashes and\n path.startswith(sep*2) and not path.startswith(sep*3)):\n initial_slashes = 2\n comps = path.split(sep)\n new_comps = []\n for comp in comps:\n if comp in (empty, dot):\n continue\n if (comp != dotdot or (not initial_slashes and not new_comps) or\n (new_comps and new_comps[-1] == dotdot)):\n new_comps.append(comp)\n elif new_comps:\n new_comps.pop()\n comps = new_comps\n path = sep.join(comps)\n if initial_slashes:\n path = sep*initial_slashes + path\n return path or dot\n\n\ndef abspath(path):\n \"\"\"Return an absolute path.\"\"\"\n path = fspath(path)\n if not isabs(path):\n cwd = os.getcwd()\n path = join(cwd, path)\n return normpath(path)\n\n\n# Return a canonical path (i.e. the absolute location of a file on the\n# filesystem).\n\ndef realpath(filename):\n \"\"\"Return the canonical path of the specified filename, eliminating any\nsymbolic links encountered in the path.\"\"\"\n filename = fspath(filename)\n path, ok = _joinrealpath(filename[:0], filename, {})\n return abspath(path)\n\n# Join two paths, normalizing and eliminating any symbolic links\n# encountered in the second path.\ndef _joinrealpath(path, rest, seen):\n if isinstance(path, str):\n sep = '/'\n curdir = '.'\n pardir = '..'\n else:\n sep = '/'\n curdir = '.'\n pardir = '..'\n\n if isabs(rest):\n rest = rest[1:]\n path = sep\n\n while rest:\n name, _, rest = rest.partition(sep)\n if not name or name == curdir:\n # current dir\n continue\n if name == pardir:\n # parent dir\n if path:\n path, name = split(path)\n if name == pardir:\n path = join(path, pardir, pardir)\n else:\n path = pardir\n continue\n newpath = join(path, name)\n if not islink(newpath):\n path = newpath\n continue\n # Resolve the symbolic link\n if newpath in seen:\n # Already seen this path\n path = seen[newpath]\n if path is not None:\n # use cached value\n continue\n # The symlink is not resolved, so we must have a symlink loop.\n # Return already resolved part + rest of the path unchanged.\n return join(newpath, rest), False\n seen[newpath] = None # not resolved symlink\n path, ok = _joinrealpath(path, os.readlink(newpath), seen)\n if not ok:\n return join(path, rest), False\n seen[newpath] = path # resolved symlink\n\n return path, True\n\n\nsupports_unicode_filenames = (sys.platform == 'darwin')\n\ndef relpath(path, start=None):\n \"\"\"Return a relative version of a path\"\"\"\n\n if not path:\n raise ValueError(\"no path specified\")\n\n path = fspath(path)\n if isinstance(path, str):\n curdir = '.'\n sep = '/'\n pardir = '..'\n else:\n curdir = '.'\n sep = '/'\n pardir = '..'\n\n if start is None:\n start = curdir\n else:\n start = fspath(start)\n\n try:\n start_list = [x for x in abspath(start).split(sep) if x]\n path_list = [x for x in abspath(path).split(sep) if x]\n # Work out how much of the filepath is shared by start and path.\n i = len(commonprefix([start_list, path_list]))\n\n rel_list = [pardir] * (len(start_list)-i) + path_list[i:]\n if not rel_list:\n return curdir\n return join(*rel_list)\n except (TypeError, AttributeError, BytesWarning, DeprecationWarning):\n genericpath._check_arg_types('relpath', path, start)\n raise\n\n\n# Return the longest common sub-path of the sequence of paths given as input.\n# The paths are not normalized before comparing them (this is the\n# responsibility of the caller). Any trailing separator is stripped from the\n# returned path.\n\ndef commonpath(paths):\n \"\"\"Given a sequence of path names, returns the longest common sub-path.\"\"\"\n\n if not paths:\n raise ValueError('commonpath() arg is an empty sequence')\n\n paths = tuple(map(fspath, paths))\n if isinstance(paths[0], str):\n sep = '/'\n curdir = '.'\n else:\n sep = '/'\n curdir = '.'\n\n try:\n split_paths = [path.split(sep) for path in paths]\n\n try:\n isabs, = set(p[:1] == sep for p in paths)\n except ValueError:\n raise ValueError(\"Can't mix absolute and relative paths\") from None\n\n split_paths = [[c for c in s if c and c != curdir] for s in split_paths]\n s1 = min(split_paths)\n s2 = max(split_paths)\n common = s1\n for i, c in enumerate(s1):\n if c != s2[i]:\n common = s1[:i]\n break\n\n prefix = sep if isabs else sep[:0]\n return prefix + sep.join(common)\n except (TypeError, AttributeError):\n genericpath._check_arg_types('commonpath', *paths)\n raise","src/lib/pprint.py":"import json\n\ndef pprint(obj, indent=1):\n print(json.dumps(obj))","src/lib/processing.js":"var $builtinmodule=function(){var b,c,d,e,f,g,h,a=Math.PI,j={},k=[],l=!0,m=null;return j.processing=null,j.p=null,j.X=new Sk.builtin.int_(0),j.Y=new Sk.builtin.int_(1),j.Z=new Sk.builtin.int_(2),j.R=new Sk.builtin.int_(3),j.G=new Sk.builtin.int_(4),j.B=new Sk.builtin.int_(5),j.A=new Sk.builtin.int_(6),j.U=new Sk.builtin.int_(7),j.V=new Sk.builtin.int_(8),j.NX=new Sk.builtin.int_(9),j.NY=new Sk.builtin.int_(10),j.NZ=new Sk.builtin.int_(11),j.EDGE=new Sk.builtin.int_(12),j.SR=new Sk.builtin.int_(13),j.SG=new Sk.builtin.int_(14),j.SB=new Sk.builtin.int_(15),j.SA=new Sk.builtin.int_(16),j.SW=new Sk.builtin.int_(17),j.TX=new Sk.builtin.int_(18),j.TY=new Sk.builtin.int_(19),j.TZ=new Sk.builtin.int_(20),j.VX=new Sk.builtin.int_(21),j.VY=new Sk.builtin.int_(22),j.VZ=new Sk.builtin.int_(23),j.VW=new Sk.builtin.int_(24),j.AR=new Sk.builtin.int_(25),j.AG=new Sk.builtin.int_(26),j.AB=new Sk.builtin.int_(27),j.DR=new Sk.builtin.int_(3),j.DG=new Sk.builtin.int_(4),j.DB=new Sk.builtin.int_(5),j.DA=new Sk.builtin.int_(6),j.SPR=new Sk.builtin.int_(28),j.SPG=new Sk.builtin.int_(29),j.SPB=new Sk.builtin.int_(30),j.SHINE=new Sk.builtin.int_(31),j.ER=new Sk.builtin.int_(32),j.EG=new Sk.builtin.int_(33),j.EB=new Sk.builtin.int_(34),j.BEEN_LIT=new Sk.builtin.int_(35),j.VERTEX_FIELD_COUNT=new Sk.builtin.int_(36),j.CENTER=new Sk.builtin.int_(3),j.RADIUS=new Sk.builtin.int_(2),j.CORNERS=new Sk.builtin.int_(1),j.CORNER=new Sk.builtin.int_(0),j.DIAMETER=new Sk.builtin.int_(3),j.BASELINE=new Sk.builtin.int_(0),j.TOP=new Sk.builtin.int_(101),j.BOTTOM=new Sk.builtin.int_(102),j.NORMAL=new Sk.builtin.int_(1),j.NORMALIZED=new Sk.builtin.int_(1),j.IMAGE=new Sk.builtin.int_(2),j.MODEL=new Sk.builtin.int_(4),j.SHAPE=new Sk.builtin.int_(5),j.AMBIENT=new Sk.builtin.int_(0),j.DIRECTIONAL=new Sk.builtin.int_(1),j.SPOT=new Sk.builtin.int_(3),j.RGB=new Sk.builtin.int_(1),j.ARGB=new Sk.builtin.int_(2),j.HSB=new Sk.builtin.int_(3),j.ALPHA=new Sk.builtin.int_(4),j.CMYK=new Sk.builtin.int_(5),j.TIFF=new Sk.builtin.int_(0),j.TARGA=new Sk.builtin.int_(1),j.JPEG=new Sk.builtin.int_(2),j.GIF=new Sk.builtin.int_(3),j.MITER=new Sk.builtin.str(\"miter\"),j.BEVEL=new Sk.builtin.str(\"bevel\"),j.ROUND=new Sk.builtin.str(\"round\"),j.SQUARE=new Sk.builtin.str(\"butt\"),j.PROJECT=new Sk.builtin.str(\"square\"),j.P2D=new Sk.builtin.int_(1),j.JAVA2D=new Sk.builtin.int_(1),j.WEBGL=new Sk.builtin.int_(2),j.P3D=new Sk.builtin.int_(2),j.OPENGL=new Sk.builtin.int_(2),j.PDF=new Sk.builtin.int_(0),j.DXF=new Sk.builtin.int_(0),j.OTHER=new Sk.builtin.int_(0),j.WINDOWS=new Sk.builtin.int_(1),j.MAXOSX=new Sk.builtin.int_(2),j.LINUX=new Sk.builtin.int_(3),j.EPSILON=new Sk.builtin.float_(1e-4),j.MAX_FLOAT=new Sk.builtin.float_(34028235e31),j.MIN_FLOAT=new Sk.builtin.float_(-34028235e31),j.MAX_INT=new Sk.builtin.int_(2147483647),j.MIN_INT=new Sk.builtin.int_(-2147483648),j.HALF_PI=new Sk.builtin.float_(a/2),j.THIRD_PI=new Sk.builtin.float_(a/3),j.PI=new Sk.builtin.float_(a),j.TWO_PI=new Sk.builtin.float_(2*a),j.TAU=new Sk.builtin.float_(2*a),j.QUARTER_PI=new Sk.builtin.float_(a/4),j.DEG_TO_RAD=new Sk.builtin.float_(a/180),j.RAD_TO_DEG=new Sk.builtin.float_(180/a),j.WHITESPACE=Sk.builtin.str(\" \\t\\n\\r\\f\\xA0\"),j.POINT=new Sk.builtin.int_(2),j.POINTS=new Sk.builtin.int_(2),j.LINE=new Sk.builtin.int_(4),j.LINES=new Sk.builtin.int_(4),j.TRIANGLE=new Sk.builtin.int_(8),j.TRIANGLES=new Sk.builtin.int_(9),j.TRIANGLE_FAN=new Sk.builtin.int_(11),j.TRIANGLE_STRIP=new Sk.builtin.int_(10),j.QUAD=new Sk.builtin.int_(16),j.QUADS=new Sk.builtin.int_(16),j.QUAD_STRIP=new Sk.builtin.int_(17),j.POLYGON=new Sk.builtin.int_(20),j.PATH=new Sk.builtin.int_(21),j.RECT=new Sk.builtin.int_(30),j.ELLIPSE=new Sk.builtin.int_(31),j.ARC=new Sk.builtin.int_(32),j.SPHERE=new Sk.builtin.int_(40),j.BOX=new Sk.builtin.int_(41),j.GROUP=new Sk.builtin.int_(0),j.PRIMITIVE=new Sk.builtin.int_(1),j.GEOMETRY=new Sk.builtin.int_(3),j.VERTEX=new Sk.builtin.int_(0),j.BEZIER_VERTEX=new Sk.builtin.int_(1),j.CURVE_VERTEX=new Sk.builtin.int_(2),j.BREAK=new Sk.builtin.int_(3),j.CLOSESHAPE=new Sk.builtin.int_(4),j.REPLACE=new Sk.builtin.int_(0),j.BLEND=new Sk.builtin.int_(1),j.ADD=new Sk.builtin.int_(2),j.SUBTRACT=new Sk.builtin.int_(4),j.LIGHTEST=new Sk.builtin.int_(8),j.DARKEST=new Sk.builtin.int_(16),j.DIFFERENCE=new Sk.builtin.int_(32),j.EXCLUSION=new Sk.builtin.int_(64),j.MULTIPLY=new Sk.builtin.int_(128),j.SCREEN=new Sk.builtin.int_(256),j.OVERLAY=new Sk.builtin.int_(512),j.HARD_LIGHT=new Sk.builtin.int_(1024),j.SOFT_LIGHT=new Sk.builtin.int_(2048),j.DODGE=new Sk.builtin.int_(4096),j.BURN=new Sk.builtin.int_(8192),j.ALPHA_MASK=new Sk.builtin.int_(4278190080),j.RED_MASK=new Sk.builtin.int_(16711680),j.GREEN_MASK=new Sk.builtin.int_(65280),j.BLUE_MASK=new Sk.builtin.int_(255),j.CUSTOM=new Sk.builtin.int_(0),j.ORTHOGRAPHIC=new Sk.builtin.int_(2),j.PERSPECTIVE=new Sk.builtin.int_(3),j.ARROW=new Sk.builtin.str(\"default\"),j.CROSS=new Sk.builtin.str(\"crosshair\"),j.HAND=new Sk.builtin.str(\"pointer\"),j.MOVE=new Sk.builtin.str(\"move\"),j.TEXT=new Sk.builtin.str(\"text\"),j.WAIT=new Sk.builtin.str(\"wait\"),j.NOCURSOR=Sk.builtin.assk$(\"url('data:image/gif;base64,R0lGODlhAQABAIAAAP///wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='), auto\",Sk.builtin.int_.str),j.DISABLE_OPENGL_2X_SMOOTH=new Sk.builtin.int_(1),j.ENABLE_OPENGL_2X_SMOOTH=new Sk.builtin.int_(-1),j.ENABLE_OPENGL_4X_SMOOTH=new Sk.builtin.int_(2),j.ENABLE_NATIVE_FONTS=new Sk.builtin.int_(3),j.DISABLE_DEPTH_TEST=new Sk.builtin.int_(4),j.ENABLE_DEPTH_TEST=new Sk.builtin.int_(-4),j.ENABLE_DEPTH_SORT=new Sk.builtin.int_(5),j.DISABLE_DEPTH_SORT=new Sk.builtin.int_(-5),j.DISABLE_OPENGL_ERROR_REPORT=new Sk.builtin.int_(6),j.ENABLE_OPENGL_ERROR_REPORT=new Sk.builtin.int_(-6),j.ENABLE_ACCURATE_TEXTURES=new Sk.builtin.int_(7),j.DISABLE_ACCURATE_TEXTURES=new Sk.builtin.int_(-7),j.HINT_COUNT=new Sk.builtin.int_(10),j.OPEN=new Sk.builtin.int_(1),j.CLOSE=new Sk.builtin.int_(2),j.BLUR=new Sk.builtin.int_(11),j.GRAY=new Sk.builtin.int_(12),j.INVERT=new Sk.builtin.int_(13),j.OPAQUE=new Sk.builtin.int_(14),j.POSTERIZE=new Sk.builtin.int_(15),j.THRESHOLD=new Sk.builtin.int_(16),j.ERODE=new Sk.builtin.int_(17),j.DILATE=new Sk.builtin.int_(18),j.BACKSPACE=new Sk.builtin.int_(8),j.TAB=new Sk.builtin.int_(9),j.ENTER=new Sk.builtin.int_(10),j.RETURN=new Sk.builtin.int_(13),j.ESC=new Sk.builtin.int_(27),j.DELETE=new Sk.builtin.int_(127),j.CODED=new Sk.builtin.int_(65535),j.SHIFT=new Sk.builtin.int_(16),j.CONTROL=new Sk.builtin.int_(17),j.ALT=new Sk.builtin.int_(18),j.CAPSLK=new Sk.builtin.int_(20),j.PGUP=new Sk.builtin.int_(33),j.PGDN=new Sk.builtin.int_(34),j.END=new Sk.builtin.int_(35),j.HOME=new Sk.builtin.int_(36),j.LEFT=new Sk.builtin.int_(37),j.UP=new Sk.builtin.int_(38),j.RIGHT=new Sk.builtin.int_(39),j.DOWN=new Sk.builtin.int_(40),j.F1=new Sk.builtin.int_(112),j.F2=new Sk.builtin.int_(113),j.F3=new Sk.builtin.int_(114),j.F4=new Sk.builtin.int_(115),j.F5=new Sk.builtin.int_(116),j.F6=new Sk.builtin.int_(117),j.F7=new Sk.builtin.int_(118),j.F8=new Sk.builtin.int_(119),j.F9=new Sk.builtin.int_(120),j.F10=new Sk.builtin.int_(121),j.F11=new Sk.builtin.int_(122),j.F12=new Sk.builtin.int_(123),j.NUMLK=new Sk.builtin.int_(144),j.META=new Sk.builtin.int_(157),j.INSERT=new Sk.builtin.int_(155),j.SINCOS_LENGTH=new Sk.builtin.int_(720),j.PRECISIONB=new Sk.builtin.int_(15),j.PRECISIONF=new Sk.builtin.int_(32768),j.PREC_MAXVAL=new Sk.builtin.int_(32767),j.PREC_ALPHA_SHIFT=new Sk.builtin.int_(9),j.PREC_RED_SHIFT=new Sk.builtin.int_(1),j.NORMAL_MODE_AUTO=new Sk.builtin.int_(0),j.NORMAL_MODE_SHAPE=new Sk.builtin.int_(1),j.NORMAL_MODE_VERTEX=new Sk.builtin.int_(2),j.MAX_LIGHTS=new Sk.builtin.int_(8),j.line=new Sk.builtin.func(function(a,b,c,d){j.processing.line(a.v,b.v,c.v,d.v)}),j.ellipse=new Sk.builtin.func(function(a,b,c,d){j.processing.ellipse(a.v,b.v,c.v,d.v)}),j.text=new Sk.builtin.func(function(a,b,c){j.processing.text(a.v,b.v,c.v)}),j.point=new Sk.builtin.func(function(a,b){j.processing.point(a.v,b.v)}),j.arc=new Sk.builtin.func(function(a,b,c,d,e,f){j.processing.arc(a.v,b.v,c.v,d.v,e.v,f.v)}),j.quad=new Sk.builtin.func(function(a,b,c,d,e,f,g,h){j.processing.quad(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v)}),j.rect=new Sk.builtin.func(function(a,b,c,d,e){\"undefined\"==typeof e?j.processing.rect(a.v,b.v,c.v,d.v):j.processing.rect(a.v,b.v,c.v,d.v,e.v)}),j.triangle=new Sk.builtin.func(function(a,b,c,d,e,f){j.processing.triangle(a.v,b.v,c.v,d.v,e.v,f.v)}),j.bezier=new Sk.builtin.func(function(a,b,c,d,e,f,g,h,i,k,l,m){\"undefined\"==typeof i?j.processing.bezier(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v):j.processing.bezier(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v,i.v,k.v,l.v,m.v)}),j.alpha=new Sk.builtin.func(function(a,c,d){return\"undefined\"==typeof c?new Sk.builtin.float_(j.processing.alpha(a.v)):\"undefined\"==typeof d?new Sk.builtin.float_(j.processing.alpha(a.v,c.v)):new Sk.builtin.float_(j.processing.alpha(a.v,c.v,d.v))}),j.ambient=new Sk.builtin.func(function(a,c,d){\"undefined\"==typeof c?j.processing.ambient(a.v):\"undefined\"==typeof d?j.processing.ambient(a.v,c.v):j.processing.ambient(a.v,c.v,d.v)}),j.ambientLight=new Sk.builtin.func(function(a,b,c,d,e,f){\"undefined\"==typeof d?j.processing.ambientLight(a.v,b.v,c.v):\"undefined\"==typeof e?j.processing.ambientLight(a.v,b.v,c.v,d.v):\"undefined\"==typeof f?j.processing.ambientLight(a.v,b.v,c.v,d.v,e.v):j.processing.ambientLight(a.v,b.v,c.v,d.v,e.v,f.v)}),j.beginCamera=new Sk.builtin.func(function(){j.processing.beginCamera()}),j.beginShape=new Sk.builtin.func(function(a){\"undefined\"==typeof a&&(a=j.POLYGON),j.processing.beginShape(a.v)}),j.bezierDetail=new Sk.builtin.func(function(a){a=\"undefined\"==typeof a?20:a.v,j.processing.bezierDetail(a)}),j.bezierPoint=new Sk.builtin.func(function(e,a,b,c,d){j.processing.bezierPoint(e.v,a.v,b.v,c.v,d.v)}),j.bezierTangent=new Sk.builtin.func(function(e,a,b,c,d){j.processing.bezierTangent(e.v,a.v,b.v,c.v,d.v)}),j.bezierVertex=new Sk.builtin.func(function(a,b,c,d,e,f,g,h,i){\"undefined\"==typeof g?j.processing.bezierVertex(a.v,b.v,c.v,d.v,e.v,f.v):\"undefined\"==typeof h?j.processing.bezierVertex(a.v,b.v,c.v,d.v,e.v,f.v,g.v):\"undefined\"==typeof i?j.processing.bezierVertex(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v):j.processing.bezierVertex(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v,i.v)}),j.blend=new Sk.builtin.func(function(a,b,c,d,e,f,g,h,i,k){other instanceof Sk.builtin.int_||other instanceof Sk.builtin.float_?j.processing.blend(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v,i.v):j.processing.blend(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v,i.v,k.v)}),j.blendColor=new Sk.builtin.func(function(a,b,d){var e=Sk.misceval.callsimArray(j.color,[new Sk.builtin.int_(0),new Sk.builtin.int_(0),new Sk.builtin.int_(0)]);return e.v=j.processing.blendColor(a.v,b.v,d.v),e}),j.brightness=new Sk.builtin.func(function(a,c,d){return\"undefined\"==typeof c?new Sk.builtin.float_(j.processing.brightness(a.v)):\"undefined\"==typeof d?new Sk.builtin.float_(j.processing.brightness(a.v,c.v)):new Sk.builtin.float_(j.processing.brightness(a.v,c.v,d.v))}),j.camera=new Sk.builtin.func(function(a,b,c,d,e,f,g,h,i){\"undefined\"==typeof a?j.processing.camera():j.processing.camera(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v,i.v)}),j.constrain=new Sk.builtin.func(function(a,b,c){return new Sk.builtin.float_(j.processing.constrain(a.v,b.v,c.v))}),j.copy=new Sk.builtin.func(function(a,b,c,d,e,f,g,h,i){other instanceof Sk.builtin.int_||other instanceof Sk.builtin.float_?j.processing.copy(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v):j.processing.copy(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v,i.v)}),j.createFont=new Sk.builtin.func(function(a,b,c,d){var e=Sk.misceval.callsimArray(j.PFont);return e.v=\"undefined\"==typeof c?j.processing.createFont(a.v,b.v):\"undefined\"==typeof d?j.processing.createFont(a.v,b.v,c.v):j.processing.createFont(a.v,b.v,c.v,d.v),e}),j.createGraphics=new Sk.builtin.func(function(a,b,c,d){var e=Sk.misceval.callsimArray(j.PGraphics);return e.v=\"undefined\"==typeof d?j.processing.createGraphics(a.v,b.v,c.v):j.processing.createGraphics(a.v,b.v,c.v,d.v),e}),j.createImage=new Sk.builtin.func(function(a,b,c){var d=Sk.misceval.callsimArray(j.PImage);return d.v=j.processing.createImage(a.v,b.v,c.v),d}),j.cursor=new Sk.builtin.func(function(a,b,c){\"undefined\"==typeof a?j.processing.cursor():\"undefined\"==typeof b?j.processing.cursor(a.v):\"undefined\"==typeof c?j.processing.cursor(a.v,b.v):j.processing.cursor(a.v,b.v,c.v)}),j.curve=new Sk.builtin.func(function(a,b,c,d,e,f,g,h,i,k,l,m){\"undefined\"==typeof i?j.processing.curve(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v):\"undefined\"==typeof k?j.processing.curve(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v,i.v):\"undefined\"==typeof l?j.processing.curve(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v,i.v,k.v):\"undefined\"==typeof m?j.processing.curve(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v,i.v,k.v,l.v):j.processing.curve(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v,i.v,k.v,l.v,m.v)}),j.curveDetail=new Sk.builtin.func(function(a){j.processing.curveDetail(a.v)}),j.curvePoint=new Sk.builtin.func(function(e,a,b,c,d){j.processing.curvePoint(e.v,a.v,b.v,c.v,d.v)}),j.curveTangent=new Sk.builtin.func(function(e,a,b,c,d){j.processing.curveTangent(e.v,a.v,b.v,c.v,d.v)}),j.curveTightness=new Sk.builtin.func(function(a){j.processing.curveTightness(a.v)}),j.curveVertex=new Sk.builtin.func(function(a,b,c){\"undefined\"==typeof c?j.processing.curveVertex(a.v,b.v):j.processing.curveVertex(a.v,b.v,c.v)}),j.day=new Sk.builtin.func(function(){return new Sk.builtin.int_(j.processing.day())}),j.degrees=new Sk.builtin.func(function(a){return new Sk.builtin.float_(j.processing.degrees(a.v))}),j.directionalLight=new Sk.builtin.func(function(a,b,c,d,e,f){j.processing.directionalLight(a.v,b.v,c.v,d.v,e.v,f.v)}),j.dist=new Sk.builtin.func(function(a,b,c,d,e,f){return\"undefined\"==typeof e?new Sk.builtin.float_(j.processing.dist(a.v,b.v,c.v,d.v)):\"undefined\"==typeof f?new Sk.builtin.float_(j.processing.dist(a.v,b.v,c.v,d.v,e.v)):new Sk.builtin.float_(j.processing.dist(a.v,b.v,c.v,d.v,e.v,f.v))}),j.emissive=new Sk.builtin.func(function(a,b,c){\"undefined\"==typeof b?j.processing.emissive(a.v):\"undefined\"==typeof c?j.processing.emissive(a.v,b.v):j.processing.emissive(a.v,b.v,c.v)}),j.endCamera=new Sk.builtin.func(function(){j.processing.endCamera()}),j.endShape=new Sk.builtin.func(function(a){\"undefined\"==typeof a?j.processing.endShape():j.processing.endShape(a.v)}),j.filter=new Sk.builtin.func(function(a,b){\"undefined\"==typeof b?j.processing.filter(a.v):j.processing.filter(a.v,b.v)}),j.frustum=new Sk.builtin.func(function(a,b,c,d,e,f){j.processing.frustum(a,b,c,d,e,f)}),j.hint=new Sk.builtin.func(function(a){j.processing.hint(a)}),j.hour=new Sk.builtin.func(function(){return new Sk.builtin.int_(j.processing.hour())}),j.hue=new Sk.builtin.func(function(a){return new Sk.builtin.float_(j.processing.hue(a.v))}),j.imageMode=new Sk.builtin.func(function(a){j.processing.imageMode(a.v)}),j.lerp=new Sk.builtin.func(function(a,b,c){return new Sk.builtin.float_(j.processing.lerp(a.v,b.v,c.v))}),j.lerpColor=new Sk.builtin.func(function(a,b,d){var e=Sk.misceval.callsimArray(j.color,[new Sk.builtin.int_(0),new Sk.builtin.int_(0),new Sk.builtin.int_(0)]);return e.v=j.processing.lerpColor(a.v,b.v,d.v),e}),j.lightFalloff=new Sk.builtin.func(function(a,b,c){j.processing.lightFalloff(a.v,b.v,c.v)}),j.lights=new Sk.builtin.func(function(){j.processing.lights()}),j.lightSpecular=new Sk.builtin.func(function(a,b,c){j.processing.lightSpecular(a.v,b.v,c.v)}),j.loadBytes=new Sk.builtin.func(function(a){return new Sk.builtin.list(j.processing.loadBytes(a.v))}),j.loadFont=new Sk.builtin.func(function(a){var b=Sk.misceval.callsimArray(j.PFont);return b.v=j.processing.loadFont(a.v),b}),j.loadShape=new Sk.builtin.func(function(a){var b=Sk.misceval.callsimArray(j.PShapeSVG,[new Sk.builtin.str(\"string\"),a]);return b}),j.loadStrings=new Sk.builtin.func(function(a){return new Sk.builtin.list(j.processing.loadStrings(a.v))}),j.mag=new Sk.builtin.func(function(d,a,b){return\"undefined\"==typeof b?new Sk.builtin.float_(j.processing.mag(d.v,a.v)):new Sk.builtin.float_(j.processing.mag(d.v,a.v,b.v))}),j.map=new Sk.builtin.func(function(a,b,c,d,e){return new Sk.builtin.float_(j.processing.map(a.v,b.v,c.v,d.v,e.v))}),j.millis=new Sk.builtin.func(function(){return new Sk.builtin.int_(j.processing.millis())}),j.minute=new Sk.builtin.func(function(){return new Sk.builtin.int_(j.processing.minute())}),j.modelX=new Sk.builtin.func(function(a,b,c){return new Sk.builtin.float_(j.processing.modelX(a.v,b.v,c.v))}),j.modelY=new Sk.builtin.func(function(a,b,c){return new Sk.builtin.float_(j.processing.modelY(a.v,b.v,c.v))}),j.modelZ=new Sk.builtin.func(function(a,b,c){return new Sk.builtin.float_(j.processing.modelZ(a.v,b.v,c.v))}),j.month=new Sk.builtin.func(function(){return new Sk.builtin.int_(j.processing.month())}),j.noCursor=new Sk.builtin.func(function(){j.processing.noCursor()}),j.noise=new Sk.builtin.func(function(a,b,c){return\"undefined\"==typeof b?new Sk.builtin.float_(j.processing.noise(a.v)):\"undefined\"==typeof c?new Sk.builtin.float_(j.processing.noise(a.v,b.v)):new Sk.builtin.float_(j.processing.noise(a.v,b.v,c.v))}),j.noiseDetail=new Sk.builtin.func(function(a,b){j.processing.noiseDetail(a.v,b.v)}),j.noiseSeed=new Sk.builtin.func(function(a){return new Sk.builtin.float_(j.processing.noiseSeed(a.v))}),j.noLights=new Sk.builtin.func(function(){j.processing.noLights()}),j.norm=new Sk.builtin.func(function(a,b,c){return new Sk.builtin.float_(j.processing.norm(a.v,b.v,c.v))}),j.normal=new Sk.builtin.func(function(a,b,c){j.processing.normal(a.v,b.v,c.v)}),j.noTint=new Sk.builtin.func(function(){j.processing.noTint()}),j.ortho=new Sk.builtin.func(function(a,b,c,d,e,f){j.processing.ortho(a.v,b.v,c.v,d.v,e.v,f.v)}),j.perspective=new Sk.builtin.func(function(a,b,c,d){\"undefined\"==typeof a?j.processing.perspective():\"undefined\"==typeof b?j.processing.perspective(a.v):\"undefined\"==typeof c?j.processing.perspective(a.v,b.v):\"undefined\"==typeof d?j.processing.perspective(a.v,b.v,c.v):j.processing.perspective(a.v,b.v,c.v,d.v)}),j.pointLight=new Sk.builtin.func(function(a,b,c,d,e,f){j.processing.pointLight(a.v,b.v,c.v,d.v,e.v,f.v)}),j.printCamera=new Sk.builtin.func(function(){j.processing.printCamera()}),j.println=new Sk.builtin.func(function(a){j.processing.println(a.v)}),j.printProjection=new Sk.builtin.func(function(){j.processing.printProjection()}),j.radians=new Sk.builtin.func(function(a){return new Sk.builtin.float_(j.processing.radians(a.v))}),j.randomSeed=new Sk.builtin.func(function(a){return new Sk.builtin.float_(j.processing.randomSeed(a.v))}),j.random=new Sk.builtin.func(function(a,b){return\"undefined\"==typeof a?new Sk.builtin.float_(j.processing.random()):\"undefined\"==typeof b?new Sk.builtin.float_(j.processing.random(a.v)):new Sk.builtin.float_(j.processing.random(a.v,b.v))}),j.requestImage=new Sk.builtin.func(function(a,b){var c=Sk.misceval.callsimArray(j.PImage);return c.v=\"undefined\"==typeof b?j.processing.requestImage(a.v):j.processing.requestImage(a.v,b.v),c}),j.saturation=new Sk.builtin.func(function(a){return new Sk.builtin.float_(j.processing.saturation(a.v))}),j.save=new Sk.builtin.func(function(a){j.processing.save(a.v)}),j.saveFrame=new Sk.builtin.func(function(a){\"undefined\"==typeof a?j.processing.saveFrame():j.processing.saveFrame(a.v)}),j.saveStrings=new Sk.builtin.func(function(a,b){j.processing.saveStrings(a.v,b.v)}),j.screenX=new Sk.builtin.func(function(a,b,c){return new Sk.builtin.float_(j.processing.screenX(a.v,b.v,c.v))}),j.screenY=new Sk.builtin.func(function(a,b,c){return new Sk.builtin.float_(j.processing.screenY(a.v,b.v,c.v))}),j.screenZ=new Sk.builtin.func(function(a,b,c){return new Sk.builtin.float_(j.processing.screenZ(a.v,b.v,c.v))}),j.second=new Sk.builtin.func(function(){return new Sk.builtin.int_(j.processing.second())}),j.shape=new Sk.builtin.func(function(a,b,c,d,e){\"undefined\"==typeof b?j.processing.shape(a.v):\"undefined\"==typeof c?j.processing.shape(a.v,b.v):\"undefined\"==typeof d?j.processing.shape(a.v,b.v,c.v):\"undefined\"==typeof e?j.processing.shape(a.v,b.v,c.v,d.v):j.processing.shape(a.v,b.v,c.v,d.v,e.v)}),j.shapeMode=new Sk.builtin.func(function(a){j.processing.shapeMode(a.v)}),j.shininess=new Sk.builtin.func(function(a){j.processing.shininess(a.v)}),j.specular=new Sk.builtin.func(function(a,b,c){\"undefined\"==typeof b?j.processing.specular(a.v):\"undefined\"==typeof c?j.processing.specular(a.v,b.v):j.processing.specular(a.v,b.v,c.v)}),j.spotLight=new Sk.builtin.func(function(a,b,c,d,e,f,g,h){j.processing.spotLight(a.v,b.v,c.v,d.v,e.v,f.v,g.v,h.v)}),j.sq=new Sk.builtin.func(function(a){return new Sk.builtin.float_(j.processing.sq(a))}),j.status=new Sk.builtin.func(function(a){j.processing.status(a.v)}),j.textAlign=new Sk.builtin.func(function(a,b){\"undefined\"==typeof b?j.processing.textAlign(a.v):j.processing.textAlign(a.v,b.v)}),j.textAscent=new Sk.builtin.func(function(){return new Sk.builtin.float_(j.processing.textAscent())}),j.textDescent=new Sk.builtin.func(function(){return new Sk.builtin.float_(j.processing.textDescent())}),j.textFont=new Sk.builtin.func(function(a,b){\"undefined\"==typeof b?j.processing.textFont(a.v):j.processing.textFont(a.v,b.v)}),j.textLeading=new Sk.builtin.func(function(a){j.processing.textLeading(a.v)}),j.textMode=new Sk.builtin.func(function(a){j.processing.textMode(a.v)}),j.textSize=new Sk.builtin.func(function(a){j.processing.textSize(a.v)}),j.texture=new Sk.builtin.func(function(a){j.processing.texture(a.v)}),j.textureMode=new Sk.builtin.func(function(a){j.processing.textureMode(a.v)}),j.textWidth=new Sk.builtin.func(function(a){return new Sk.builtin.float_(j.processing.textWidth(a.v))}),j.tint=new Sk.builtin.func(function(a,b,c,d){\"undefined\"==typeof b?j.processing.tint(a.v):\"undefined\"==typeof c?j.processing.tint(a.v,b.v):\"undefined\"==typeof d?j.processing.tint(a.v,b.v,c.v):j.processing.tint(a.v,b.v,c.v,d.v)}),j.updatePixels=new Sk.builtin.func(function(){j.processing.updatePixels()}),j.vertex=new Sk.builtin.func(function(a,b,c,d,e){\"undefined\"==typeof c?j.processing.vertex(a.v,b.v):\"undefined\"==typeof d?j.processing.vertex(a.v,b.v,c.v):\"undefined\"==typeof e?j.processing.vertex(a.v,b.v,c.v,d.v):j.processing.vertex(a.v,b.v,c.v,d.v,e.v)}),j.year=new Sk.builtin.func(function(){return new Sk.builtin.int_(j.processing.year())}),j.box=new Sk.builtin.func(function(a){j.processing.box(a.v)}),j.sphere=new Sk.builtin.func(function(a){j.processing.sphere(a.v)}),j.sphereDetail=new Sk.builtin.func(function(a,b){\"undefined\"==typeof b?j.processing.sphereDetail(a.v):j.processing.sphereDetail(a.v,b.v)}),j.background=new Sk.builtin.func(function(a,c,d){\"undefined\"!=typeof c&&(c=c.v),\"undefined\"!=typeof d&&(d=d.v),j.processing.background(a.v,c,d)}),j.fill=new Sk.builtin.func(function(a,c,d,e){\"undefined\"!=typeof c&&(c=c.v),\"undefined\"!=typeof d&&(d=d.v),\"undefined\"!=typeof e&&(e=e.v),j.processing.fill(a.v,c,d,e)}),j.stroke=new Sk.builtin.func(function(a,c,d,e){\"undefined\"!=typeof c&&(c=c.v),\"undefined\"!=typeof d&&(d=d.v),\"undefined\"!=typeof e&&(e=e.v),j.processing.stroke(a.v,c,d,e)}),j.noStroke=new Sk.builtin.func(function(){j.processing.noStroke()}),j.colorMode=new Sk.builtin.func(function(a,b,c,d,e){b=\"undefined\"==typeof b?255:b.v,\"undefined\"!=typeof c&&(c=c.v),\"undefined\"!=typeof d&&(d=d.v),\"undefined\"!=typeof e&&(e=e.v),j.processing.colorMode(a.v,b,c,d,e)}),j.noFill=new Sk.builtin.func(function(){j.processing.noFill()}),j.loop=new Sk.builtin.func(function(){if(null===j.processing)throw new Sk.builtin.Exception(\"loop() should be called after run()\");l=!0,j.processing.loop()}),j.noLoop=new Sk.builtin.func(function(){if(null===j.processing)throw new Sk.builtin.Exception(\"noLoop() should be called after run()\");l=!1,j.processing.noLoop()}),j.frameRate=new Sk.builtin.func(function(a){j.processing.frameRate(a.v)}),j.width=new Sk.builtin.int_(0),j.height=new Sk.builtin.int_(0),j.renderMode=j.P2D,j.size=new Sk.builtin.func(function(a,b,c){\"undefined\"==typeof c&&(c=j.P2D),j.processing.size(a.v,b.v,c.v),j.width=new Sk.builtin.int_(j.processing.width),j.height=new Sk.builtin.int_(j.processing.height),j.renderMode=c}),j.exitp=new Sk.builtin.func(function(){j.processing.exit()}),j.mouseX=new Sk.builtin.func(function(){return new Sk.builtin.int_(j.processing.mouseX)}),j.mouseY=new Sk.builtin.func(function(){return new Sk.builtin.int_(j.processing.mouseY)}),j.pmouseX=new Sk.builtin.func(function(){return new Sk.builtin.int_(j.processing.pmouseX)}),j.pmouseY=new Sk.builtin.func(function(){return new Sk.builtin.int_(j.processing.pmouseY)}),j.rectMode=new Sk.builtin.func(function(a){j.processing.rectMode(a.v)}),j.strokeWeight=new Sk.builtin.func(function(a){j.processing.strokeWeight(a.v)}),j.smooth=new Sk.builtin.func(function(){j.processing.smooth()}),j.noSmooth=new Sk.builtin.func(function(){j.processing.noSmooth()}),j.ellipseMode=new Sk.builtin.func(function(a){j.processing.ellipseMode(a.v)}),j.strokeCap=new Sk.builtin.func(function(a){j.processing.strokeCap(a.v)}),j.strokeJoin=new Sk.builtin.func(function(a){j.processing.strokeJoin(a.v)}),j.rotate=new Sk.builtin.func(function(a){j.processing.rotate(a.v)}),j.rotateX=new Sk.builtin.func(function(a){j.processing.rotateX(a.v)}),j.rotateY=new Sk.builtin.func(function(a){j.processing.rotateY(a.v)}),j.rotateZ=new Sk.builtin.func(function(a){j.processing.rotateZ(a.v)}),j.scale=new Sk.builtin.func(function(a,b,c){b=\"undefined\"==typeof b?1:b.v,c=\"undefined\"==typeof c?1:c.v,j.processing.scale(a.v,b,c)}),j.translate=new Sk.builtin.func(function(a,b,c){b=\"undefined\"==typeof b?1:b.v,c=\"undefined\"==typeof c?1:c.v,j.processing.translate(a.v,b,c)}),j.popMatrix=new Sk.builtin.func(function(){j.processing.popMatrix()}),j.pushMatrix=new Sk.builtin.func(function(){j.processing.pushMatrix()}),j.applyMatrix=new Sk.builtin.func(function(){var a,b=Array.prototype.slice.call(arguments,0,16);for(a=0;a 0):\n self.percDown(i)\n i = i - 1\n \n def percDown(self,i):\n while (i * 2) <= self.currentSize:\n mc = self.minChild(i)\n if self.heapArray[i][0] > self.heapArray[mc][0]:\n tmp = self.heapArray[i]\n self.heapArray[i] = self.heapArray[mc]\n self.heapArray[mc] = tmp\n i = mc\n \n def minChild(self,i):\n if i*2 > self.currentSize:\n return -1\n else:\n if i*2 + 1 > self.currentSize:\n return i*2\n else:\n if self.heapArray[i*2][0] < self.heapArray[i*2+1][0]:\n return i*2\n else:\n return i*2+1\n\n def percUp(self,i):\n while i // 2 > 0:\n if self.heapArray[i][0] < self.heapArray[i//2][0]:\n tmp = self.heapArray[i//2]\n self.heapArray[i//2] = self.heapArray[i]\n self.heapArray[i] = tmp\n i = i//2\n \n def add(self,k):\n self.heapArray.append(k)\n self.currentSize = self.currentSize + 1\n self.percUp(self.currentSize)\n\n def delMin(self):\n retval = self.heapArray[1][1]\n self.heapArray[1] = self.heapArray[self.currentSize]\n self.currentSize = self.currentSize - 1\n self.heapArray.pop()\n self.percDown(1)\n return retval\n \n def isEmpty(self):\n if self.currentSize == 0:\n return True\n else:\n return False\n\n def decreaseKey(self,val,amt):\n # this is a little wierd, but we need to find the heap thing to decrease by\n # looking at its value\n done = False\n i = 1\n myKey = 0\n while not done and i <= self.currentSize:\n if self.heapArray[i][1] == val:\n done = True\n myKey = i\n else:\n i = i + 1\n if myKey > 0:\n self.heapArray[myKey] = (amt,self.heapArray[myKey][1])\n self.percUp(myKey)\n \n def __contains__(self,vtx):\n for pair in self.heapArray:\n if pair[1] == vtx:\n return True\n return False\n \nclass TestBinHeap(unittest.TestCase):\n def setUp(self):\n self.theHeap = PriorityQueue()\n self.theHeap.add((2,'x'))\n self.theHeap.add((3,'y'))\n self.theHeap.add((5,'z'))\n self.theHeap.add((6,'a'))\n self.theHeap.add((4,'d'))\n\n\n def testInsert(self):\n assert self.theHeap.currentSize == 5\n\n def testDelmin(self):\n assert self.theHeap.delMin() == 'x'\n assert self.theHeap.delMin() == 'y'\n \n def testDecKey(self):\n self.theHeap.decreaseKey('d',1)\n assert self.theHeap.delMin() == 'd'\n \nif __name__ == '__main__':\n unittest.main()\n","src/lib/pythonds/graphs/__init__.py":"\n\nfrom .adjGraph import Graph\nfrom .adjGraph import Vertex\nfrom .priorityQueue import PriorityQueue\n","src/lib/pythonds/trees/balance.py":"#!/bin/env python3.1\n# Bradley N. Miller, David L. Ranum\n# Introduction to Data Structures and Algorithms in Python\n# Copyright 2005, 2010\n# \n\nfrom .bst import BinarySearchTree, TreeNode\n\nclass AVLTree(BinarySearchTree):\n '''\n Author: Brad Miller\n Date: 1/15/2005\n Description: Imlement a binary search tree with the following interface\n functions: \n __contains__(y) <==> y in x\n __getitem__(y) <==> x[y]\n __init__()\n __len__() <==> len(x)\n __setitem__(k,v) <==> x[k] = v\n clear()\n get(k)\n has_key(k)\n items() \n keys() \n values()\n put(k,v)\n '''\n\n\n def _put(self,key,val,currentNode):\n if key < currentNode.key:\n if currentNode.hasLeftChild():\n self._put(key,val,currentNode.leftChild)\n else:\n currentNode.leftChild = TreeNode(key,val,parent=currentNode)\n self.updateBalance(currentNode.leftChild)\n else:\n if currentNode.hasRightChild():\n self._put(key,val,currentNode.rightChild)\n else:\n currentNode.rightChild = TreeNode(key,val,parent=currentNode)\n self.updateBalance(currentNode.rightChild) \n\n def updateBalance(self,node):\n if node.balanceFactor > 1 or node.balanceFactor < -1:\n self.rebalance(node)\n return\n if node.parent != None:\n if node.isLeftChild():\n node.parent.balanceFactor += 1\n elif node.isRightChild():\n node.parent.balanceFactor -= 1\n\n if node.parent.balanceFactor != 0:\n self.updateBalance(node.parent)\n\n def rebalance(self,node):\n if node.balanceFactor < 0:\n if node.rightChild.balanceFactor > 0:\n # Do an LR Rotation\n self.rotateRight(node.rightChild)\n self.rotateLeft(node)\n else:\n # single left\n self.rotateLeft(node)\n elif node.balanceFactor > 0:\n if node.leftChild.balanceFactor < 0:\n # Do an RL Rotation\n self.rotateLeft(node.leftChild)\n self.rotateRight(node)\n else:\n # single right\n self.rotateRight(node)\n\n def rotateLeft(self,rotRoot):\n newRoot = rotRoot.rightChild\n rotRoot.rightChild = newRoot.leftChild\n if newRoot.leftChild != None:\n newRoot.leftChild.parent = rotRoot\n newRoot.parent = rotRoot.parent\n if rotRoot.isRoot():\n self.root = newRoot\n else:\n if rotRoot.isLeftChild():\n rotRoot.parent.leftChild = newRoot\n else:\n rotRoot.parent.rightChild = newRoot\n newRoot.leftChild = rotRoot\n rotRoot.parent = newRoot\n rotRoot.balanceFactor = rotRoot.balanceFactor + 1 - min(newRoot.balanceFactor, 0)\n newRoot.balanceFactor = newRoot.balanceFactor + 1 + max(rotRoot.balanceFactor, 0)\n\n\n def rotateRight(self,rotRoot):\n newRoot = rotRoot.leftChild\n rotRoot.leftChild = newRoot.rightChild\n if newRoot.rightChild != None:\n newRoot.rightChild.parent = rotRoot\n newRoot.parent = rotRoot.parent\n if rotRoot.isRoot():\n self.root = newRoot\n else:\n if rotRoot.isRightChild():\n rotRoot.parent.rightChild = newRoot\n else:\n rotRoot.parent.leftChild = newRoot\n newRoot.rightChild = rotRoot\n rotRoot.parent = newRoot\n rotRoot.balanceFactor = rotRoot.balanceFactor - 1 - max(newRoot.balanceFactor, 0)\n newRoot.balanceFactor = newRoot.balanceFactor - 1 + min(rotRoot.balanceFactor, 0)\n \n","src/lib/pythonds/trees/binaryTree.py":"# Bradley N. Miller, David L. Ranum\n# Introduction to Data Structures and Algorithms in Python\n# Copyright 2005\n# \n\nclass BinaryTree:\n \"\"\"\n A recursive implementation of Binary Tree\n Using links and Nodes approach.\n \"\"\" \n def __init__(self,rootObj):\n self.key = rootObj\n self.leftChild = None\n self.rightChild = None\n\n def insertLeft(self,newNode):\n if self.leftChild == None:\n self.leftChild = BinaryTree(newNode)\n else:\n t = BinaryTree(newNode)\n t.left = self.leftChild\n self.leftChild = t\n \n def insertRight(self,newNode):\n if self.rightChild == None:\n self.rightChild = BinaryTree(newNode)\n else:\n t = BinaryTree(newNode)\n t.right = self.rightChild\n self.rightChild = t\n\n def isLeaf(self):\n return ((not self.leftChild) and (not self.rightChild))\n\n def getRightChild(self):\n return self.rightChild\n\n def getLeftChild(self):\n return self.leftChild\n\n def setRootVal(self,obj):\n self.key = obj\n\n def getRootVal(self,):\n return self.key\n\n def inorder(self):\n if self.leftChild:\n self.leftChild.inorder()\n print(self.key)\n if self.rightChild:\n self.rightChild.inorder()\n\n def postorder(self):\n if self.leftChild:\n self.leftChild.postorder()\n if self.rightChild:\n self.rightChild.postorder()\n print(self.key)\n\n\n def preorder(self):\n print(self.key)\n if self.leftChild:\n self.leftChild.preorder()\n if self.rightChild:\n self.rightChild.preorder()\n\n def printexp(self):\n if self.leftChild:\n print('(')\n self.leftChild.printexp()\n print(self.key)\n if self.rightChild:\n self.rightChild.printexp()\n print(')')\n\n def postordereval(self):\n opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}\n res1 = None\n res2 = None\n if self.leftChild:\n res1 = self.leftChild.postordereval() #// \\label{peleft}\n if self.rightChild:\n res2 = self.rightChild.postordereval() #// \\label{peright}\n if res1 and res2:\n return opers[self.key](res1,res2) #// \\label{peeval}\n else:\n return self.key\n\ndef inorder(tree):\n if tree != None:\n inorder(tree.getLeftChild())\n print(tree.getRootVal())\n inorder(tree.getRightChild())\n\ndef printexp(tree):\n if tree.leftChild:\n print('(')\n printexp(tree.getLeftChild())\n print(tree.getRootVal())\n if tree.rightChild:\n printexp(tree.getRightChild())\n print(')') \n\ndef printexp(tree):\n sVal = \"\"\n if tree:\n sVal = '(' + printexp(tree.getLeftChild())\n sVal = sVal + str(tree.getRootVal())\n sVal = sVal + printexp(tree.getRightChild()) + ')'\n return sVal\n\ndef postordereval(tree):\n opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}\n res1 = None\n res2 = None\n if tree:\n res1 = postordereval(tree.getLeftChild()) #// \\label{peleft}\n res2 = postordereval(tree.getRightChild()) #// \\label{peright}\n if res1 and res2:\n return opers[tree.getRootVal()](res1,res2) #// \\label{peeval}\n else:\n return tree.getRootVal()\n\ndef height(tree):\n if tree == None:\n return -1\n else:\n return 1 + max(height(tree.leftChild),height(tree.rightChild))\n\n# t = BinaryTree(7)\n# t.insertLeft(3)\n# t.insertRight(9)\n# inorder(t)\n# import operator\n# x = BinaryTree('*')\n# x.insertLeft('+')\n# l = x.getLeftChild()\n# l.insertLeft(4)\n# l.insertRight(5)\n# x.insertRight(7)\n# print(printexp(x))\n# print(postordereval(x))\n# print(height(x))\n","src/lib/pythonds/trees/binheap.py":"# Bradley N. Miller, David L. Ranum\n# Introduction to Data Structures and Algorithms in Python\n# Copyright 2005\n# \n\n# this heap takes key value pairs, we will assume that the keys are integers\nclass BinHeap:\n def __init__(self):\n self.heapList = [0]\n self.currentSize = 0\n\n\n def buildHeap(self,alist):\n i = len(alist) // 2\n self.currentSize = len(alist)\n self.heapList = [0] + alist[:]\n print(len(self.heapList), i)\n while (i > 0):\n print(self.heapList, i)\n self.percDown(i)\n i = i - 1\n print(self.heapList,i)\n \n def percDown(self,i):\n while (i * 2) <= self.currentSize:\n mc = self.minChild(i)\n if self.heapList[i] > self.heapList[mc]:\n tmp = self.heapList[i]\n self.heapList[i] = self.heapList[mc]\n self.heapList[mc] = tmp\n i = mc\n \n def minChild(self,i):\n if i * 2 + 1 > self.currentSize:\n return i * 2\n else:\n if self.heapList[i * 2] < self.heapList[i * 2 + 1]:\n return i * 2\n else:\n return i * 2 + 1\n\n def percUp(self,i):\n while i // 2 > 0:\n if self.heapList[i] < self.heapList[i//2]:\n tmp = self.heapList[i // 2]\n self.heapList[i // 2] = self.heapList[i]\n self.heapList[i] = tmp\n i = i // 2\n \n def insert(self,k):\n self.heapList.append(k)\n self.currentSize = self.currentSize + 1\n self.percUp(self.currentSize)\n\n def delMin(self):\n retval = self.heapList[1]\n self.heapList[1] = self.heapList[self.currentSize]\n self.currentSize = self.currentSize - 1\n self.heapList.pop()\n self.percDown(1)\n return retval\n \n def isEmpty(self):\n if currentSize == 0:\n return True\n else:\n return False\n","src/lib/pythonds/trees/bst.py":"#!/bin/env python3.1\n# Bradley N. Miller, David L. Ranum\n# Introduction to Data Structures and Algorithms in Python\n# Copyright 2005, 2010\n# \n\nclass BinarySearchTree:\n '''\n Author: Brad Miller\n Date: 1/15/2005\n Description: Imlement a binary search tree with the following interface\n functions: \n __contains__(y) <==> y in x\n __getitem__(y) <==> x[y]\n __init__()\n __len__() <==> len(x)\n __setitem__(k,v) <==> x[k] = v\n clear()\n get(k)\n items() \n keys() \n values()\n put(k,v)\n in\n del <==> \n '''\n\n def __init__(self):\n self.root = None\n self.size = 0\n \n def put(self,key,val):\n if self.root:\n self._put(key,val,self.root)\n else:\n self.root = TreeNode(key,val)\n self.size = self.size + 1\n\n def _put(self,key,val,currentNode):\n if key < currentNode.key:\n if currentNode.hasLeftChild():\n self._put(key,val,currentNode.leftChild)\n else:\n currentNode.leftChild = TreeNode(key,val,parent=currentNode)\n else:\n if currentNode.hasRightChild():\n self._put(key,val,currentNode.rightChild)\n else:\n currentNode.rightChild = TreeNode(key,val,parent=currentNode)\n \n def __setitem__(self,k,v):\n self.put(k,v)\n\n def get(self,key):\n if self.root:\n res = self._get(key,self.root)\n if res:\n return res.payload\n else:\n return None\n else:\n return None\n \n def _get(self,key,currentNode):\n if not currentNode:\n return None\n elif currentNode.key == key:\n return currentNode\n elif key < currentNode.key:\n return self._get(key,currentNode.leftChild)\n else:\n return self._get(key,currentNode.rightChild)\n \n \n def __getitem__(self,key):\n res = self.get(key)\n if res:\n return res\n else:\n raise KeyError('Error, key not in tree')\n \n\n def __contains__(self,key):\n if self._get(key,self.root):\n return True\n else:\n return False\n \n def length(self):\n return self.size\n\n def __len__(self):\n return self.size\n\n def __iter__(self):\n return self.root.__iter__()\n \n def delete(self,key):\n if self.size > 1:\n nodeToRemove = self._get(key,self.root)\n if nodeToRemove:\n self.remove(nodeToRemove)\n self.size = self.size-1\n else:\n raise KeyError('Error, key not in tree')\n elif self.size == 1 and self.root.key == key:\n self.root = None\n self.size = self.size - 1\n else:\n raise KeyError('Error, key not in tree')\n\n def __delitem__(self,key):\n self.delete(key)\n \n def remove(self,currentNode):\n if currentNode.isLeaf(): #leaf\n if currentNode == currentNode.parent.leftChild:\n currentNode.parent.leftChild = None\n else:\n currentNode.parent.rightChild = None\n elif currentNode.hasBothChildren(): #interior\n succ = currentNode.findSuccessor()\n succ.spliceOut()\n currentNode.key = succ.key\n currentNode.payload = succ.payload\n else: # this node has one child\n if currentNode.hasLeftChild():\n if currentNode.isLeftChild():\n currentNode.leftChild.parent = currentNode.parent\n currentNode.parent.leftChild = currentNode.leftChild\n elif currentNode.isRightChild():\n currentNode.leftChild.parent = currentNode.parent\n currentNode.parent.rightChild = currentNode.leftChild\n else:\n currentNode.replaceNodeData(currentNode.leftChild.key,\n currentNode.leftChild.payload,\n currentNode.leftChild.leftChild,\n currentNode.leftChild.rightChild)\n else:\n if currentNode.isLeftChild():\n currentNode.rightChild.parent = currentNode.parent\n currentNode.parent.leftChild = currentNode.rightChild\n elif currentNode.isRightChild():\n currentNode.rightChild.parent = currentNode.parent\n currentNode.parent.rightChild = currentNode.rightChild\n else:\n currentNode.replaceNodeData(currentNode.rightChild.key,\n currentNode.rightChild.payload,\n currentNode.rightChild.leftChild,\n currentNode.rightChild.rightChild)\n\n def inorder(self):\n self._inorder(self.root)\n\n def _inorder(self,tree):\n if tree != None:\n self._inorder(tree.leftChild)\n print(tree.key)\n self._inorder(tree.rightChild)\n\n def postorder(self):\n self._postorder(self.root)\n\n def _postorder(self, tree):\n if tree:\n self._postorder(tree.rightChild)\n self._postorder(tree.leftChild)\n print(tree.key) \n\n def preorder(self):\n self._preorder(self,self.root)\n\n def _preorder(self,tree):\n if tree:\n print(tree.key) \n self._preorder(tree.leftChild)\n self._preorder(tree.rightChild)\n\n \nclass TreeNode:\n def __init__(self,key,val,left=None,right=None,parent=None):\n self.key = key\n self.payload = val\n self.leftChild = left\n self.rightChild = right\n self.parent = parent\n self.balanceFactor = 0\n \n def hasLeftChild(self):\n return self.leftChild\n\n def hasRightChild(self):\n return self.rightChild\n \n def isLeftChild(self):\n return self.parent and self.parent.leftChild == self\n\n def isRightChild(self):\n return self.parent and self.parent.rightChild == self\n\n def isRoot(self):\n return not self.parent\n\n def isLeaf(self):\n return not (self.rightChild or self.leftChild)\n\n def hasAnyChildren(self):\n return self.rightChild or self.leftChild\n\n def hasBothChildren(self):\n return self.rightChild and self.leftChild\n \n def replaceNodeData(self,key,value,lc,rc):\n self.key = key\n self.payload = value\n self.leftChild = lc\n self.rightChild = rc\n if self.hasLeftChild():\n self.leftChild.parent = self\n if self.hasRightChild():\n self.rightChild.parent = self\n \n def findSuccessor(self):\n succ = None\n if self.hasRightChild():\n succ = self.rightChild.findMin()\n else:\n if self.parent:\n if self.isLeftChild():\n succ = self.parent\n else:\n self.parent.rightChild = None\n succ = self.parent.findSuccessor()\n self.parent.rightChild = self\n return succ\n\n\n def spliceOut(self):\n if self.isLeaf():\n if self.isLeftChild():\n self.parent.leftChild = None\n else:\n self.parent.rightChild = None\n elif self.hasAnyChildren():\n if self.hasLeftChild():\n if self.isLeftChild():\n self.parent.leftChild = self.leftChild\n else:\n self.parent.rightChild = self.leftChild\n self.leftChild.parent = self.parent\n else:\n if self.isLeftChild():\n self.parent.leftChild = self.rightChild\n else:\n self.parent.rightChild = self.rightChild\n self.rightChild.parent = self.parent\n\n def findMin(self):\n current = self\n while current.hasLeftChild():\n current = current.leftChild\n return current\n\n def __iter__(self):\n \"\"\"The standard inorder traversal of a binary tree.\"\"\"\n if self:\n if self.hasLeftChild():\n for elem in self.leftChild:\n yield elem\n yield self.key\n if self.hasRightChild():\n for elem in self.rightChild:\n yield elem\n\n \n","src/lib/pythonds/trees/__init__.py":"\n# from .binaryTree import BinaryTree\n# from .balance import AVLTree\n# from .bst import BinarySearchTree\n# from .binheap import BinHeap\n\n\n","src/lib/pythonds/__init__.py":"","src/lib/py_compile.py":"raise NotImplementedError(\"py_compile is not yet implemented in Skulpt\")\n","src/lib/Queue.py":"raise NotImplementedError(\"Queue is not yet implemented in Skulpt\")\n","src/lib/quopri.py":"raise NotImplementedError(\"quopri is not yet implemented in Skulpt\")\n","src/lib/random.js":"var MersenneTwister=function(a){a==null&&(a=new Date().getTime()),this.N=624,this.M=397,this.MATRIX_A=2567483615,this.UPPER_MASK=2147483648,this.LOWER_MASK=2147483647,this.mt=Array(this.N),this.mti=this.N+1,this.init_genrand(a)};MersenneTwister.prototype.init_genrand=function(a){for(this.mt[0]=a>>>0,this.mti=1;this.mti>>30;this.mt[this.mti]=(1812433253*((4294901760&a)>>>16)<<16)+1812433253*(65535&a)+this.mti,this.mt[this.mti]>>>=0}},MersenneTwister.prototype.init_by_array=function(a,b){var d,e,f;for(this.init_genrand(19650218),d=1,e=0,f=this.N>b?this.N:b;f;f--){var g=this.mt[d-1]^this.mt[d-1]>>>30;this.mt[d]=(this.mt[d]^(1664525*((4294901760&g)>>>16)<<16)+1664525*(65535&g))+a[e]+e,this.mt[d]>>>=0,d++,e++,d>=this.N&&(this.mt[0]=this.mt[this.N-1],d=1),e>=b&&(e=0)}for(f=this.N-1;f;f--){var g=this.mt[d-1]^this.mt[d-1]>>>30;this.mt[d]=(this.mt[d]^(1566083941*((4294901760&g)>>>16)<<16)+1566083941*(65535&g))-d,this.mt[d]>>>=0,d++,d>=this.N&&(this.mt[0]=this.mt[this.N-1],d=1)}this.mt[0]=2147483648},MersenneTwister.prototype.genrand_int32=function(){var a,b=[0,this.MATRIX_A];if(this.mti>=this.N){var d;for(this.mti==this.N+1&&this.init_genrand(5489),d=0;d>>1^b[1&a];for(;d>>1^b[1&a];a=this.mt[this.N-1]&this.UPPER_MASK|this.mt[0]&this.LOWER_MASK,this.mt[this.N-1]=this.mt[this.M-1]^a>>>1^b[1&a],this.mti=0}return a=this.mt[this.mti++],a^=a>>>11,a^=2636928640&a<<7,a^=4022730752&a<<15,a^=a>>>18,a>>>0},MersenneTwister.prototype.genrand_int31=function(){return this.genrand_int32()>>>1},MersenneTwister.prototype.genrand_real1=function(){return this.genrand_int32()*(1/4294967295)},MersenneTwister.prototype.random=function(){return this.genrand_int32()*(1/4294967296)},MersenneTwister.prototype.genrand_real3=function(){return(this.genrand_int32()+.5)*(1/4294967296)},MersenneTwister.prototype.genrand_res53=function(){var d=this.genrand_int32()>>>5,a=this.genrand_int32()>>>6;return(67108864*d+a)*(1/9007199254740992)};var $builtinmodule=function(){var a=Math.log,b=Math.sqrt,d={},e=new MersenneTwister,f=void 0;d.seed=new Sk.builtin.func(function(a){return Sk.builtin.pyCheckArgsLen(\"seed\",arguments.length,0,1),a=Sk.builtin.asnum$(a),e=0d)h=g((f+d+1)/d);else throw new Sk.builtin.ValueError(\"zero step for randrange()\");if(0>=h)throw new Sk.builtin.ValueError(\"empty range for randrange()\");return i=a+d*g(e.genrand_res53()*h),new Sk.builtin.int_(i)};d.randint=new Sk.builtin.func(function(d,e){return Sk.builtin.pyCheckArgsLen(\"randint\",arguments.length,2,2),d=Sk.builtin.asnum$(d),e=Sk.builtin.asnum$(e),h(d,e+1)}),d.randrange=new Sk.builtin.func(function(a,b,d){return Sk.builtin.pyCheckArgsLen(\"randrange\",arguments.length,1,3),a=Sk.builtin.asnum$(a),b=Sk.builtin.asnum$(b),d=Sk.builtin.asnum$(d),h(a,b,d)}),d.uniform=new Sk.builtin.func(function(d,f){Sk.builtin.pyCheckArgsLen(\"uniform\",arguments.length,2,2),d=Sk.builtin.asnum$(d),f=Sk.builtin.asnum$(f);var g=e.genrand_res53();return c=d+g*(f-d),new Sk.builtin.float_(c)}),d.triangular=new Sk.builtin.func(function(a,d,f){Sk.builtin.pyCheckArgsLen(\"triangular\",arguments.length,2,3),Sk.builtin.pyCheckType(\"low\",\"number\",Sk.builtin.checkNumber(a)),Sk.builtin.pyCheckType(\"high\",\"number\",Sk.builtin.checkNumber(d));var g,h,i;return a=Sk.builtin.asnum$(a),d=Sk.builtin.asnum$(d),a>d&&(i=a,a=d,d=i),void 0===f||f instanceof Sk.builtin.none?f=(d-a)/2:(Sk.builtin.pyCheckType(\"mode\",\"number\",Sk.builtin.checkNumber(f)),f=Sk.builtin.asnum$(f)),g=e.genrand_res53(),h=g<(f-a)/(d-a)?a+b(g*(d-a)*(f-a)):d-b((1-g)*(d-a)*(d-f)),new Sk.builtin.float_(h)});var i=function(d,g){var k,l,m,n,o,h=Math.sin,i=Math.cos,j=Math.PI;return void 0===f?(k=e.genrand_res53(),l=e.genrand_res53(),m=b(-2*a(k)),n=2*j*l,o=m*i(n),f=m*h(n)):(o=f,f=void 0),d+g*o};return d.gauss=new Sk.builtin.func(function(a,b){return Sk.builtin.pyCheckArgsLen(\"gauss\",arguments.length,2,2),Sk.builtin.pyCheckType(\"mu\",\"number\",Sk.builtin.checkNumber(a)),Sk.builtin.pyCheckType(\"sigma\",\"number\",Sk.builtin.checkNumber(b)),a=Sk.builtin.asnum$(a),b=Sk.builtin.asnum$(b),new Sk.builtin.float_(i(a,b))}),d.normalvariate=d.gauss,d.lognormvariate=new Sk.builtin.func(function(a,b){var d=Math.exp;return Sk.builtin.pyCheckArgsLen(\"lognormvariate\",arguments.length,2,2),Sk.builtin.pyCheckType(\"mu\",\"number\",Sk.builtin.checkNumber(a)),Sk.builtin.pyCheckType(\"sigma\",\"number\",Sk.builtin.checkNumber(b)),a=Sk.builtin.asnum$(a),b=Sk.builtin.asnum$(b),new Sk.builtin.float_(d(i(a,b)))}),d.expovariate=new Sk.builtin.func(function(b){Sk.builtin.pyCheckArgsLen(\"expovariate\",arguments.length,1,1),Sk.builtin.pyCheckType(\"lambd\",\"number\",Sk.builtin.checkNumber(b)),b=Sk.builtin.asnum$(b);var d=e.genrand_res53();return new Sk.builtin.float_(-a(d)/b)}),d.choice=new Sk.builtin.func(function(a){if(Sk.builtin.pyCheckArgsLen(\"choice\",arguments.length,1,1),Sk.builtin.pyCheckType(\"seq\",\"sequence\",Sk.builtin.checkSequence(a)),void 0!==a.sq$length){var b=g(e.genrand_res53()*a.sq$length());return a.mp$subscript(b)}throw new Sk.builtin.TypeError(\"object has no length\")}),d.shuffle=new Sk.builtin.func(function(a){if(Sk.builtin.pyCheckArgsLen(\"shuffle\",arguments.length,1,1),Sk.builtin.pyCheckType(\"x\",\"sequence\",Sk.builtin.checkSequence(a)),void 0===a.sq$length)throw new Sk.builtin.TypeError(\"object has no length\");else if(void 0!==a.mp$ass_subscript)for(var b=a.sq$length()-1;0=c)););return j.push(new Sk.builtin.str(f.substring(l))),new Sk.builtin.list(j)},_split.co_varnames=[\"pattern\",\"string\",\"maxsplit\",\"flags\"],_split.$defaults=[new Sk.builtin.int_(0),new Sk.builtin.int_(0)],mod.split=new Sk.builtin.func(_split),_findall=function(a,b,c){var d,e,f,g,h,j;if(Sk.builtin.pyCheckArgsLen(\"findall\",arguments.length,2,3),!Sk.builtin.checkString(a))throw new Sk.builtin.TypeError(\"pattern must be a string\");if(!Sk.builtin.checkString(b))throw new Sk.builtin.TypeError(\"string must be a string\");if(void 0===c&&(c=0),!Sk.builtin.checkNumber(c))throw new Sk.builtin.TypeError(\"flags must be a number\");if(d=Sk.ffi.unwrapo(a),e=Sk.ffi.unwrapo(b),d=convert(d),f=getFlags(c),g=new RegExp(d,f),d.match(/\\$/)){var k=new RegExp(/\\n$/);e.match(k)&&(e=e.slice(0,-1))}for(h=[],j;null!=(j=g.exec(e));){if(2>j.length)h.push(new Sk.builtin.str(j[0]));else if(2==j.length)h.push(new Sk.builtin.str(j[1]));else{for(var l=[],m=1;m=a.thematch.v.length)throw new Sk.builtin.IndexError(\"Index out of range: \"+b);return a.thematch.v[b]})},mod.MatchObject=Sk.misceval.buildClass(mod,matchobj,\"MatchObject\",[]),mod._findre=function(res,string){res=res.replace(/([^\\\\]){,(?![^\\[]*\\])/g,\"$1{0,\");var matches,sitem,retval,re=eval(res),patt=/\\n$/,str=Sk.ffi.remapToJs(string);if(matches=str.match(patt)?str.slice(0,-1).match(re):str.match(re),retval=new Sk.builtin.list,null==matches)return retval;for(var i=0;ilst.v.length)?Sk.builtin.none.none$:(d=Sk.misceval.callsimArray(mod.MatchObject,[lst,a,b]),d)},_search.co_varnames=[\"pattern\",\"string\",\"flags\"],_search.$defaults=[new Sk.builtin.int_(0)],mod.search=new Sk.builtin.func(_search),_match=function(a,b,c){var d,e;if(Sk.builtin.pyCheckArgsLen(\"match\",arguments.length,2,3),!Sk.builtin.checkString(a))throw new Sk.builtin.TypeError(\"pattern must be a string\");if(!Sk.builtin.checkString(b))throw new Sk.builtin.TypeError(\"string must be a string\");if(void 0===c&&(c=0),!Sk.builtin.checkNumber(c))throw new Sk.builtin.TypeError(\"flags must be a number\");return(pat=Sk.ffi.remapToJs(a),e=\"/^\"+pat.replace(/\\//g,\"\\\\/\")+\"/\",lst=mod._findre(e,b),1>Sk.ffi.remapToJs(lst).length)?Sk.builtin.none.none$:(d=Sk.misceval.callsimArray(mod.MatchObject,[lst,a,b]),d)},_match.co_varnames=[\"pattern\",\"string\",\"flags\"],_match.$defaults=[new Sk.builtin.int_(0)],mod.match=new Sk.builtin.func(_match),regexobj=function(a,b){var c,d,e,f,g,h;b.__init__=new Sk.builtin.func(function(a,b,c){a.re=b,a.flags=void 0===c?0:c}),h=new Sk.builtin.func(function(a){var b=\"re.compile('\"+Sk.ffi.remapToPy(a.re)+\"')\";return Sk.ffi.remapToPy(b.substring(0,212))}),b.__str__=h,b.__repr__=h,c=function(a,b,c){var d=Sk.ffi.remapToJs(a),e=null==b?0:Sk.ffi.remapToJs(b),f=null==c?d.length:Sk.ffi.remapToJs(c);return\"^\"==e&&(e=d.indexOf(\"\\n\")+1),(f==Sk.builtin.none.none$||null===f)&&(f=d.length),Sk.ffi.remapToPy(d.substring(e,f))},d=function(a,b,d,e){Sk.builtin.pyCheckArgsLen(\"search\",arguments.length,2,4);var f=c(b,d,e);return _search(a.re,f,a.flags)},d.co_varnames=[\"self\",\"string\",\"pos\",\"endpos\"],d.$defaults=[new Sk.builtin.int_(0),Sk.builtin.none.none$],b.search=new Sk.builtin.func(d),e=function(a,b,d,e){Sk.builtin.pyCheckArgsLen(\"match\",arguments.length,2,4);var f=c(b,d,e);return _match(a.re,f,a.flags)},e.co_varnames=[\"self\",\"string\",\"pos\",\"endpos\"],e.$defaults=[new Sk.builtin.int_(0),Sk.builtin.none.none$],b.match=new Sk.builtin.func(e),f=function(a,b,c){if(Sk.builtin.pyCheckArgsLen(\"split\",arguments.length,2,3),void 0===c&&(c=0),!Sk.builtin.checkInt(c))throw new Sk.builtin.TypeError(\"maxsplit must be an integer\");return _split(a.re,b,c,a.flags)},f.co_varnames=[\"self\",\"string\",\"maxsplit\"],f.$defaults=[new Sk.builtin.int_(0)],b.split=new Sk.builtin.func(f),g=function(a,b,d,e){Sk.builtin.pyCheckArgsLen(\"findall\",arguments.length,2,4);var f=c(b,d,e);return _findall(a.re,f,a.flags)},g.co_varnames=[\"self\",\"string\",\"pos\",\"endpos\"],g.$defaults=[new Sk.builtin.int_(0),Sk.builtin.none.none$],b.findall=new Sk.builtin.func(g)},mod.RegexObject=Sk.misceval.buildClass(mod,regexobj,\"RegexObject\",[]),mod.compile=new Sk.builtin.func(function(a,b){var c;if(Sk.builtin.pyCheckArgsLen(\"compile\",arguments.length,1,2),!Sk.builtin.checkString(a))throw new Sk.builtin.TypeError(\"pattern must be a string\");if(void 0===b&&(b=0),!Sk.builtin.checkNumber(b))throw new Sk.builtin.TypeError(\"flags must be a number\");return c=Sk.misceval.callsimArray(mod.RegexObject,[a,b]),c}),mod.purge=new Sk.builtin.func(function(){}),mod};","src/lib/repr.py":"raise NotImplementedError(\"repr is not yet implemented in Skulpt\")\n","src/lib/reprlib.py":"\"\"\"Redo the builtin repr() (representation) but with limits on most sizes.\"\"\"\n\n__all__ = [\"Repr\", \"repr\", \"recursive_repr\"]\n\nfrom itertools import islice\nfrom _thread import get_ident\n\ndef recursive_repr(fillvalue='...'):\n 'Decorator to make a repr function return fillvalue for a recursive call'\n\n def decorating_function(user_function):\n repr_running = set()\n\n def wrapper(self):\n key = id(self), get_ident()\n if key in repr_running:\n return fillvalue\n repr_running.add(key)\n try:\n result = user_function(self)\n finally:\n repr_running.discard(key)\n return result\n\n # Can't use functools.wraps() here because of bootstrap issues\n wrapper.__module__ = getattr(user_function, '__module__')\n wrapper.__doc__ = getattr(user_function, '__doc__')\n wrapper.__name__ = getattr(user_function, '__name__')\n wrapper.__qualname__ = getattr(user_function, '__qualname__')\n wrapper.__annotations__ = getattr(user_function, '__annotations__', {})\n return wrapper\n\n return decorating_function\n\nclass Repr:\n\n def __init__(self):\n self.maxlevel = 6\n self.maxtuple = 6\n self.maxlist = 6\n self.maxarray = 5\n self.maxdict = 4\n self.maxset = 6\n self.maxfrozenset = 6\n self.maxdeque = 6\n self.maxstring = 30\n self.maxlong = 40\n self.maxother = 30\n\n def repr(self, x):\n return self.repr1(x, self.maxlevel)\n\n def repr1(self, x, level):\n typename = type(x).__name__\n if ' ' in typename:\n parts = typename.split()\n typename = '_'.join(parts)\n if hasattr(self, 'repr_' + typename):\n return getattr(self, 'repr_' + typename)(x, level)\n else:\n return self.repr_instance(x, level)\n\n def _repr_iterable(self, x, level, left, right, maxiter, trail=''):\n n = len(x)\n if level <= 0 and n:\n s = '...'\n else:\n newlevel = level - 1\n repr1 = self.repr1\n pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]\n if n > maxiter: pieces.append('...')\n s = ', '.join(pieces)\n if n == 1 and trail: right = trail + right\n return '%s%s%s' % (left, s, right)\n\n def repr_tuple(self, x, level):\n return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')\n\n def repr_list(self, x, level):\n return self._repr_iterable(x, level, '[', ']', self.maxlist)\n\n def repr_array(self, x, level):\n if not x:\n return \"array('%s')\" % x.typecode\n header = \"array('%s', [\" % x.typecode\n return self._repr_iterable(x, level, header, '])', self.maxarray)\n\n def repr_set(self, x, level):\n if not x:\n return 'set()'\n x = _possibly_sorted(x)\n return self._repr_iterable(x, level, '{', '}', self.maxset)\n\n def repr_frozenset(self, x, level):\n if not x:\n return 'frozenset()'\n x = _possibly_sorted(x)\n return self._repr_iterable(x, level, 'frozenset({', '})',\n self.maxfrozenset)\n\n def repr_deque(self, x, level):\n return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)\n\n def repr_dict(self, x, level):\n n = len(x)\n if n == 0: return '{}'\n if level <= 0: return '{...}'\n newlevel = level - 1\n repr1 = self.repr1\n pieces = []\n for key in islice(_possibly_sorted(x), self.maxdict):\n keyrepr = repr1(key, newlevel)\n valrepr = repr1(x[key], newlevel)\n pieces.append('%s: %s' % (keyrepr, valrepr))\n if n > self.maxdict: pieces.append('...')\n s = ', '.join(pieces)\n return '{%s}' % (s,)\n\n def repr_str(self, x, level):\n s = original_repr(x[:self.maxstring])\n if len(s) > self.maxstring:\n i = max(0, (self.maxstring-3)//2)\n j = max(0, self.maxstring-3-i)\n s = original_repr(x[:i] + x[len(x)-j:])\n s = s[:i] + '...' + s[len(s)-j:]\n return s\n\n def repr_int(self, x, level):\n s = original_repr(x) # XXX Hope this isn't too slow...\n if len(s) > self.maxlong:\n i = max(0, (self.maxlong-3)//2)\n j = max(0, self.maxlong-3-i)\n s = s[:i] + '...' + s[len(s)-j:]\n return s\n\n def repr_instance(self, x, level):\n try:\n s = original_repr(x)\n # Bugs in x.__repr__() can cause arbitrary\n # exceptions -- then make up something\n except Exception:\n return '<%s instance at %#x>' % (x.__class__.__name__, id(x))\n if len(s) > self.maxother:\n i = max(0, (self.maxother-3)//2)\n j = max(0, self.maxother-3-i)\n s = s[:i] + '...' + s[len(s)-j:]\n return s\n\n\ndef _possibly_sorted(x):\n # Since not all sequences of items can be sorted and comparison\n # functions may raise arbitrary exceptions, return an unsorted\n # sequence in that case.\n try:\n return sorted(x)\n except Exception:\n return list(x)\n\noriginal_repr = repr\naRepr = Repr()\nrepr = aRepr.repr\n","src/lib/requests/__init__.js":"var $builtinmodule=function(){var a={};return a.Response=Sk.misceval.buildClass(a,function(a,b){b.__init__=new Sk.builtin.func(function(a,b){a.data$=b,a.lineList=a.data$.split(\"\\n\"),a.lineList=a.lineList.slice(0,-1);for(var c=0;c\")}),b.__repr__=b.__str__,b.__iter__=new Sk.builtin.func(function(a){var b=a.lineList;return Sk.builtin.makeGenerator(function(){return this.$index>=this.$lines.length?void 0:new Sk.builtin.str(this.$lines[this.$index++])},{$obj:a,$index:0,$lines:b})}),b.read=new Sk.builtin.func(function(a,b){if(a.closed)throw new Sk.builtin.ValueError(\"I/O operation on closed file\");var c=a.data$.length;void 0===b&&(b=c);var d=new Sk.builtin.str(a.data$.substr(a.pos$,b));return a.pos$+=b,a.pos$>=c&&(a.pos$=c),d}),b.readline=new Sk.builtin.func(function(a){var b=\"\";return a.currentLineb||b>=d)throw new Sk.builtin.ValueError(\"Index must have a value between 0 and \"+d);if(!(c instanceof Sk.builtin.int_))throw new Sk.builtin.TypeError(\"Value must be an integer\");c=Sk.ffi.unwrapo(c),-32768>c&&(c=-32768),32767b||b>=d)throw new Sk.builtin.ValueError(\"Index must have a value between 0 and \"+d);if(!(c instanceof Sk.builtin.int_))throw new Sk.builtin.TypeError(\"Value must be an integer\");c=Sk.ffi.unwrapo(c),-32768>c&&(c=-32768),32767b||b>=d)throw new Sk.builtin.ValueError(\"Index must have a value between 0 and \"+d);if(!(c instanceof Sk.builtin.int_))throw new Sk.builtin.TypeError(\"Value must be an integer\");c=Sk.ffi.unwrapo(c),-32768>c&&(c=-32768),32767b||b>=c)throw new Sk.builtin.ValueError(\"Index must have a value between 0 and \"+c);return new Sk.builtin.int_(pythy.Sound.mapFloatTo16BitInt(a._sound.getLeftSample(Sk.ffi.unwrapo(b))))}),getLeftSample:new Sk.builtin.func(function(a,b){var c;if(Sk.builtin.pyCheckArgs(\"getLeftSample\",arguments,2),c=a._sound.getLength(),0>b||b>=c)throw new Sk.builtin.ValueError(\"Index must have a value between 0 and \"+c);return new Sk.builtin.int_(pythy.Sound.mapFloatTo16BitInt(a._sound.getLeftSample(Sk.ffi.unwrapo(b))))}),getRightSample:new Sk.builtin.func(function(a,b){var c;if(Sk.builtin.pyCheckArgs(\"getRightSample\",arguments,2),c=a._sound.getLength(),0>b||b>=c)throw new Sk.builtin.ValueError(\"Index must have a value between 0 and \"+c);return new Sk.builtin.int_(pythy.Sound.mapFloatTo16BitInt(a._sound.getRightSample(Sk.ffi.unwrapo(b))))}),getSampleObjectAt:new Sk.builtin.func(function(a,b){var d;if(Sk.builtin.pyCheckArgs(\"getSampleObjectAt\",arguments,2),d=a._sound.getLength(),0>b||b>=d)throw new Sk.builtin.ValueError(\"Index must have a value between 0 and \"+d);return Sk.misceval.callsim(c,a,b)}),getSamples:new Sk.builtin.func(function(a){var b,d;Sk.builtin.pyCheckArgs(\"getSamples\",arguments,1),b=[],d=a._sound.getLength();for(var e=0;eSk.ffi.unwrapo(a))throw new Sk.builtin.ValueError(\"Duration can not be negative\");return d=Sk.ffi.unwrapo(a)*(Sk.ffi.unwrapo(c)||window.pythy.Sound.SAMPLE_RATE),Sk.misceval.callsim(b.Sound,new Sk.builtin.int_(d),c)}),openSoundTool:new Sk.builtin.func(function(a){Sk.builtin.pyCheckArgs(\"openSoundTool\",arguments,1),window.pythy.soundTool.start(a._sound)}),writeSoundTo:new Sk.builtin.func(function(a,b){Sk.builtin.pyCheckArgs(\"writeSoundTo\",arguments,2),a._sound.save(Sk.ffi.unwrapo(b))})}),b};","src/lib/sound/__init__.js":"var $builtinmodule=function(){return{}};","src/lib/sqlite3/__init__.py":"raise NotImplementedError(\"sqlite3 is not yet implemented in Skulpt\")\n","src/lib/sre.py":"raise NotImplementedError(\"sre is not yet implemented in Skulpt\")\n","src/lib/sre_compile.py":"raise NotImplementedError(\"sre_compile is not yet implemented in Skulpt\")\n","src/lib/sre_constants.py":"raise NotImplementedError(\"sre_constants is not yet implemented in Skulpt\")\n","src/lib/sre_parse.py":"raise NotImplementedError(\"sre_parse is not yet implemented in Skulpt\")\n","src/lib/ssl.py":"raise NotImplementedError(\"ssl is not yet implemented in Skulpt\")\n","src/lib/stat.py":"\"\"\"Constants/functions for interpreting results of os.stat() and os.lstat().\n\nSuggested usage: from stat import *\n\"\"\"\n\n# Indices for stat struct members in the tuple returned by os.stat()\n\nST_MODE = 0\nST_INO = 1\nST_DEV = 2\nST_NLINK = 3\nST_UID = 4\nST_GID = 5\nST_SIZE = 6\nST_ATIME = 7\nST_MTIME = 8\nST_CTIME = 9\n\n# Extract bits from the mode\n\ndef S_IMODE(mode):\n \"\"\"Return the portion of the file's mode that can be set by\n os.chmod().\n \"\"\"\n return mode & 0o7777\n\ndef S_IFMT(mode):\n \"\"\"Return the portion of the file's mode that describes the\n file type.\n \"\"\"\n return mode & 0o170000\n\n# Constants used as S_IFMT() for various file types\n# (not all are implemented on all systems)\n\nS_IFDIR = 0o040000 # directory\nS_IFCHR = 0o020000 # character device\nS_IFBLK = 0o060000 # block device\nS_IFREG = 0o100000 # regular file\nS_IFIFO = 0o010000 # fifo (named pipe)\nS_IFLNK = 0o120000 # symbolic link\nS_IFSOCK = 0o140000 # socket file\n\n# Functions to test for each file type\n\ndef S_ISDIR(mode):\n \"\"\"Return True if mode is from a directory.\"\"\"\n return S_IFMT(mode) == S_IFDIR\n\ndef S_ISCHR(mode):\n \"\"\"Return True if mode is from a character special device file.\"\"\"\n return S_IFMT(mode) == S_IFCHR\n\ndef S_ISBLK(mode):\n \"\"\"Return True if mode is from a block special device file.\"\"\"\n return S_IFMT(mode) == S_IFBLK\n\ndef S_ISREG(mode):\n \"\"\"Return True if mode is from a regular file.\"\"\"\n return S_IFMT(mode) == S_IFREG\n\ndef S_ISFIFO(mode):\n \"\"\"Return True if mode is from a FIFO (named pipe).\"\"\"\n return S_IFMT(mode) == S_IFIFO\n\ndef S_ISLNK(mode):\n \"\"\"Return True if mode is from a symbolic link.\"\"\"\n return S_IFMT(mode) == S_IFLNK\n\ndef S_ISSOCK(mode):\n \"\"\"Return True if mode is from a socket.\"\"\"\n return S_IFMT(mode) == S_IFSOCK\n\n# Names for permission bits\n\nS_ISUID = 0o4000 # set UID bit\nS_ISGID = 0o2000 # set GID bit\nS_ENFMT = S_ISGID # file locking enforcement\nS_ISVTX = 0o1000 # sticky bit\nS_IREAD = 0o0400 # Unix V7 synonym for S_IRUSR\nS_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR\nS_IEXEC = 0o0100 # Unix V7 synonym for S_IXUSR\nS_IRWXU = 0o0700 # mask for owner permissions\nS_IRUSR = 0o0400 # read by owner\nS_IWUSR = 0o0200 # write by owner\nS_IXUSR = 0o0100 # execute by owner\nS_IRWXG = 0o0070 # mask for group permissions\nS_IRGRP = 0o0040 # read by group\nS_IWGRP = 0o0020 # write by group\nS_IXGRP = 0o0010 # execute by group\nS_IRWXO = 0o0007 # mask for others (not in group) permissions\nS_IROTH = 0o0004 # read by others\nS_IWOTH = 0o0002 # write by others\nS_IXOTH = 0o0001 # execute by others\n\n# Names for file flags\n\nUF_NODUMP = 0x00000001 # do not dump file\nUF_IMMUTABLE = 0x00000002 # file may not be changed\nUF_APPEND = 0x00000004 # file may only be appended to\nUF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack\nUF_NOUNLINK = 0x00000010 # file may not be renamed or deleted\nUF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed\nUF_HIDDEN = 0x00008000 # OS X: file should not be displayed\nSF_ARCHIVED = 0x00010000 # file may be archived\nSF_IMMUTABLE = 0x00020000 # file may not be changed\nSF_APPEND = 0x00040000 # file may only be appended to\nSF_NOUNLINK = 0x00100000 # file may not be renamed or deleted\nSF_SNAPSHOT = 0x00200000 # file is a snapshot file\n\n\n_filemode_table = (\n ((S_IFLNK, \"l\"),\n (S_IFSOCK, \"s\"), # Must appear before IFREG and IFDIR as IFSOCK == IFREG | IFDIR\n (S_IFREG, \"-\"),\n (S_IFBLK, \"b\"),\n (S_IFDIR, \"d\"),\n (S_IFCHR, \"c\"),\n (S_IFIFO, \"p\")),\n\n ((S_IRUSR, \"r\"),),\n ((S_IWUSR, \"w\"),),\n ((S_IXUSR|S_ISUID, \"s\"),\n (S_ISUID, \"S\"),\n (S_IXUSR, \"x\")),\n\n ((S_IRGRP, \"r\"),),\n ((S_IWGRP, \"w\"),),\n ((S_IXGRP|S_ISGID, \"s\"),\n (S_ISGID, \"S\"),\n (S_IXGRP, \"x\")),\n\n ((S_IROTH, \"r\"),),\n ((S_IWOTH, \"w\"),),\n ((S_IXOTH|S_ISVTX, \"t\"),\n (S_ISVTX, \"T\"),\n (S_IXOTH, \"x\"))\n)\n\ndef filemode(mode):\n \"\"\"Convert a file's mode to a string of the form '-rwxrwxrwx'.\"\"\"\n perm = []\n for table in _filemode_table:\n for bit, char in table:\n if mode & bit == bit:\n perm.append(char)\n break\n else:\n perm.append(\"-\")\n return \"\".join(perm)\n\n\n# Windows FILE_ATTRIBUTE constants for interpreting os.stat()'s\n# \"st_file_attributes\" member\n\nFILE_ATTRIBUTE_ARCHIVE = 32\nFILE_ATTRIBUTE_COMPRESSED = 2048\nFILE_ATTRIBUTE_DEVICE = 64\nFILE_ATTRIBUTE_DIRECTORY = 16\nFILE_ATTRIBUTE_ENCRYPTED = 16384\nFILE_ATTRIBUTE_HIDDEN = 2\nFILE_ATTRIBUTE_INTEGRITY_STREAM = 32768\nFILE_ATTRIBUTE_NORMAL = 128\nFILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 8192\nFILE_ATTRIBUTE_NO_SCRUB_DATA = 131072\nFILE_ATTRIBUTE_OFFLINE = 4096\nFILE_ATTRIBUTE_READONLY = 1\nFILE_ATTRIBUTE_REPARSE_POINT = 1024\nFILE_ATTRIBUTE_SPARSE_FILE = 512\nFILE_ATTRIBUTE_SYSTEM = 4\nFILE_ATTRIBUTE_TEMPORARY = 256\nFILE_ATTRIBUTE_VIRTUAL = 65536\n\n\n# If available, use C implementation\ntry:\n from _stat import *\nexcept ImportError:\n pass","src/lib/statvfs.py":"raise NotImplementedError(\"statvfs is not yet implemented in Skulpt\")\n","src/lib/string.js":"var $builtinmodule=function(){var a={};return a.ascii_lowercase=Sk.builtin.str(\"abcdefghijklmnopqrstuvwxyz\"),a.ascii_uppercase=Sk.builtin.str(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"),a.ascii_letters=Sk.builtin.str(a.ascii_lowercase.v+a.ascii_uppercase.v),a.lowercase=Sk.builtin.str(\"abcdefghijklmnopqrstuvwxyz\"),a.uppercase=Sk.builtin.str(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"),a.letters=Sk.builtin.str(a.lowercase.v+a.uppercase.v),a.digits=Sk.builtin.str(\"0123456789\"),a.hexdigits=Sk.builtin.str(\"0123456789abcdefABCDEF\"),a.octdigits=Sk.builtin.str(\"01234567\"),a.punctuation=Sk.builtin.str(\"!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~\"),a.whitespace=Sk.builtin.str(\"\\t\\n\\x0B\\f\\r \"),a.printable=Sk.builtin.str(a.digits.v+a.letters.v+a.punctuation.v+\" \\t\\n\\r\\x0B\\f\"),a.split=new Sk.builtin.func(function(a,b,c){return Sk.misceval.callsimArray(Sk.builtin.str.prototype.split,[a,b,c])}),a.capitalize=new Sk.builtin.func(function(a){return Sk.misceval.callsimArray(Sk.builtin.str.prototype.capitalize,[a])}),a.join=new Sk.builtin.func(function(a,b){return void 0===b&&(b=Sk.builtin.str(\" \")),Sk.misceval.callsimArray(Sk.builtin.str.prototype.join,[b,a])}),a.capwords=new Sk.builtin.func(function(b,c){if(Sk.builtin.pyCheckArgsLen(\"capwords\",arguments.length,1,2),!Sk.builtin.checkString(b))throw new Sk.builtin.TypeError(\"s must be a string\");if(void 0===c&&(c=Sk.builtin.str(\" \")),!Sk.builtin.checkString(c))throw new Sk.builtin.TypeError(\"sep must be a string\");for(var d=Sk.misceval.callsimArray(a.split,[b,c]),e=[],f=0;f' % type(value).__name__\n\n# --\n\ndef print_exc(limit=None, file=None, chain=True):\n \"\"\"Shorthand for 'print_exception(*sys.exc_info(), limit, file)'.\"\"\"\n print_exception(*sys.exc_info(), limit=limit, file=file, chain=chain)\n\ndef format_exc(limit=None, chain=True):\n \"\"\"Like print_exc() but return a string.\"\"\"\n return \"\".join(format_exception(*sys.exc_info(), limit=limit, chain=chain))\n\ndef print_last(limit=None, file=None, chain=True):\n \"\"\"This is a shorthand for 'print_exception(sys.last_type,\n sys.last_value, sys.last_traceback, limit, file)'.\"\"\"\n if not hasattr(sys, \"last_type\"):\n raise ValueError(\"no last exception\")\n print_exception(sys.last_type, sys.last_value, sys.last_traceback,\n limit, file, chain)\n\n#\n# Printing and Extracting Stacks.\n#\n\ndef print_stack(f=None, limit=None, file=None):\n \"\"\"Print a stack trace from its invocation point.\n\n The optional 'f' argument can be used to specify an alternate\n stack frame at which to start. The optional 'limit' and 'file'\n arguments have the same meaning as for print_exception().\n \"\"\"\n if f is None:\n f = sys._getframe().f_back\n print_list(extract_stack(f, limit=limit), file=file)\n\n\ndef format_stack(f=None, limit=None):\n \"\"\"Shorthand for 'format_list(extract_stack(f, limit))'.\"\"\"\n if f is None:\n f = sys._getframe().f_back\n return format_list(extract_stack(f, limit=limit))\n\n\ndef extract_stack(f=None, limit=None):\n \"\"\"Extract the raw traceback from the current stack frame.\n\n The return value has the same format as for extract_tb(). The\n optional 'f' and 'limit' arguments have the same meaning as for\n print_stack(). Each item in the list is a quadruple (filename,\n line number, function name, text), and the entries are in order\n from oldest to newest stack frame.\n \"\"\"\n if f is None:\n f = sys._getframe().f_back\n stack = StackSummary.extract(walk_stack(f), limit=limit)\n stack.reverse()\n return stack\n\n\ndef clear_frames(tb):\n \"Clear all references to local variables in the frames of a traceback.\"\n while tb is not None:\n try:\n tb.tb_frame.clear()\n except RuntimeError:\n # Ignore the exception raised if the frame is still executing.\n pass\n tb = tb.tb_next\n\n\nclass FrameSummary:\n \"\"\"A single frame from a traceback.\n\n - :attr:`filename` The filename for the frame.\n - :attr:`lineno` The line within filename for the frame that was\n active when the frame was captured.\n - :attr:`name` The name of the function or method that was executing\n when the frame was captured.\n - :attr:`line` The text from the linecache module for the\n of code that was running when the frame was captured.\n - :attr:`locals` Either None if locals were not supplied, or a dict\n mapping the name to the repr() of the variable.\n \"\"\"\n\n __slots__ = ('filename', 'lineno', 'name', 'line', 'locals')\n\n def __init__(self, filename, lineno, name, *, lookup_line=True,\n locals=None, line=None):\n \"\"\"Construct a FrameSummary.\n\n :param lookup_line: If True, `linecache` is consulted for the source\n code line. Otherwise, the line will be looked up when first needed.\n :param locals: If supplied the frame locals, which will be captured as\n object representations.\n :param line: If provided, use this instead of looking up the line in\n the linecache.\n \"\"\"\n self.filename = filename\n self.lineno = lineno\n self.name = name\n self._line = line\n #if lookup_line:\n # self.line\n self.locals = {k: repr(v) for k, v in locals.items()} if locals else None\n\n def __eq__(self, other):\n if isinstance(other, FrameSummary):\n return (self.filename == other.filename and\n self.lineno == other.lineno and\n self.name == other.name and\n self.locals == other.locals)\n if isinstance(other, tuple):\n return (self.filename, self.lineno, self.name, self._line) == other\n return NotImplemented\n\n def __getitem__(self, pos):\n return (self.filename, self.lineno, self.name, self._line)[pos]\n\n def __iter__(self):\n return iter([self.filename, self.lineno, self.name, self._line])\n\n def __repr__(self):\n return \"\".format(\n filename=self.filename, lineno=self.lineno, name=self.name)\n\n def __len__(self):\n return 4\n\n #@property\n #def line(self):\n # if self._line is None:\n # self._line = linecache.getline(self.filename, self.lineno).strip()\n # return self._line\n\n\ndef walk_stack(f):\n \"\"\"Walk a stack yielding the frame and line number for each frame.\n\n This will follow f.f_back from the given frame. If no frame is given, the\n current stack is used. Usually used with StackSummary.extract.\n \"\"\"\n if f is None:\n f = sys._getframe().f_back.f_back\n while f is not None:\n yield f, f.f_lineno\n f = f.f_back\n\n\ndef walk_tb(tb):\n \"\"\"Walk a traceback yielding the frame and line number for each frame.\n\n This will follow tb.tb_next (and thus is in the opposite order to\n walk_stack). Usually used with StackSummary.extract.\n \"\"\"\n while tb is not None:\n yield tb.tb_frame, tb.tb_lineno\n tb = tb.tb_next\n\n\n_RECURSIVE_CUTOFF = 3 # Also hardcoded in traceback.c.\n\n\nclass StackSummary(list):\n \"\"\"A stack of frames.\"\"\"\n\n @staticmethod\n def extract(frame_gen, limit=None, lookup_lines=True, capture_locals=False):\n \"\"\"Create a StackSummary from a traceback or stack object.\n\n :param frame_gen: A generator that yields (frame, lineno) tuples to\n include in the stack.\n :param limit: None to include all frames or the number of frames to\n include.\n :param lookup_lines: If True, lookup lines for each frame immediately,\n otherwise lookup is deferred until the frame is rendered.\n :param capture_locals: If True, the local variables from each frame will\n be captured as object representations into the FrameSummary.\n \"\"\"\n if limit is None:\n limit = getattr(sys, 'tracebacklimit', None)\n if limit is not None and limit < 0:\n limit = 0\n if limit is not None:\n if limit >= 0:\n frame_gen = list(frame_gen)[:limit]\n else:\n frame_gen = collections.deque(frame_gen, maxlen=-limit)\n\n result = StackSummary()\n fnames = set()\n for f, lineno in frame_gen:\n co = f.f_code\n filename = f.co_filename # co.co_filename\n name = f.co_name # co.co_name\n line = f.f_line\n\n fnames.add(filename)\n # linecache.lazycache(filename, f.f_globals)\n # Must defer line lookups until we have called checkcache.\n if capture_locals:\n f_locals = f.f_locals\n else:\n f_locals = None\n result.append(FrameSummary(\n filename, lineno, name, line=line, lookup_line=False, locals=f_locals))\n # for filename in fnames:\n # linecache.checkcache(filename)\n # If immediate lookup was desired, trigger lookups now.\n # if lookup_lines:\n # for f in result:\n # f.line\n return result\n\n @classmethod\n def from_list(klass, a_list):\n \"\"\"\n Create a StackSummary object from a supplied list of\n FrameSummary objects or old-style list of tuples.\n \"\"\"\n # While doing a fast-path check for isinstance(a_list, StackSummary) is\n # appealing, idlelib.run.cleanup_traceback and other similar code may\n # break this by making arbitrary frames plain tuples, so we need to\n # check on a frame by frame basis.\n result = StackSummary()\n for frame in a_list:\n if isinstance(frame, FrameSummary):\n result.append(frame)\n else:\n filename, lineno, name, line = frame\n result.append(FrameSummary(filename, lineno, name, line=line))\n return result\n\n def format(self):\n \"\"\"Format the stack ready for printing.\n\n Returns a list of strings ready for printing. Each string in the\n resulting list corresponds to a single frame from the stack.\n Each string ends in a newline; the strings may contain internal\n newlines as well, for those items with source text lines.\n\n For long sequences of the same frame and line, the first few\n repetitions are shown, followed by a summary line stating the exact\n number of further repetitions.\n \"\"\"\n result = []\n last_file = None\n last_line = None\n last_name = None\n count = 0\n for frame in self:\n if (last_file is None or last_file != frame.filename or\n last_line is None or last_line != frame.lineno or\n last_name is None or last_name != frame.name):\n if count > _RECURSIVE_CUTOFF:\n count -= _RECURSIVE_CUTOFF\n result.append((\n ' [Previous line repeated {count} more '\n 'time{s_count}]\\n'\n ).format(count=count, s_count=\"s\" if count > 1 else \"\"))\n last_file = frame.filename\n last_line = frame.lineno\n last_name = frame.name\n count = 0\n count += 1\n if count > _RECURSIVE_CUTOFF:\n continue\n row = []\n row.append(' File \"{}\", line {}, in {}\\n'.format(\n frame.filename, frame.lineno, frame.name))\n if frame._line:\n row.append(' {}\\n'.format(frame._line.strip()))\n if frame.locals:\n for name, value in sorted(frame.locals.items()):\n row.append(' {name} = {value}\\n'.format(name=name, value=value))\n result.append(''.join(row))\n if count > _RECURSIVE_CUTOFF:\n count -= _RECURSIVE_CUTOFF\n result.append((\n ' [Previous line repeated {count} more '\n 'time{s_count}]\\n'\n ).format(count=count, s_count='s' if count > 1 else ''))\n return result\n\n\nclass TracebackException:\n \"\"\"An exception ready for rendering.\n\n The traceback module captures enough attributes from the original exception\n to this intermediary form to ensure that no references are held, while\n still being able to fully print or format it.\n\n Use `from_exception` to create TracebackException instances from exception\n objects, or the constructor to create TracebackException instances from\n individual components.\n\n - :attr:`__cause__` A TracebackException of the original *__cause__*.\n - :attr:`__context__` A TracebackException of the original *__context__*.\n - :attr:`__suppress_context__` The *__suppress_context__* value from the\n original exception.\n - :attr:`stack` A `StackSummary` representing the traceback.\n - :attr:`exc_type` The class of the original traceback.\n - :attr:`filename` For syntax errors - the filename where the error\n occurred.\n - :attr:`lineno` For syntax errors - the linenumber where the error\n occurred.\n - :attr:`text` For syntax errors - the text where the error\n occurred.\n - :attr:`offset` For syntax errors - the offset into the text where the\n error occurred.\n - :attr:`msg` For syntax errors - the compiler error message.\n \"\"\"\n\n def __init__(self, exc_type, exc_value, exc_traceback, *, limit=None,\n lookup_lines=True, capture_locals=False, _seen=None):\n # NB: we need to accept exc_traceback, exc_value, exc_traceback to\n # permit backwards compat with the existing API, otherwise we\n # need stub thunk objects just to glue it together.\n # Handle loops in __cause__ or __context__.\n if _seen is None:\n _seen = set()\n _seen.add(id(exc_value))\n # Gracefully handle (the way Python 2.4 and earlier did) the case of\n # being called with no type or value (None, None, None).\n if (exc_value and exc_value.__cause__ is not None\n and id(exc_value.__cause__) not in _seen):\n cause = TracebackException(\n type(exc_value.__cause__),\n exc_value.__cause__,\n exc_value.__cause__.__traceback__,\n limit=limit,\n lookup_lines=False,\n capture_locals=capture_locals,\n _seen=_seen)\n else:\n cause = None\n if (exc_value and exc_value.__context__ is not None\n and id(exc_value.__context__) not in _seen):\n context = TracebackException(\n type(exc_value.__context__),\n exc_value.__context__,\n exc_value.__context__.__traceback__,\n limit=limit,\n lookup_lines=False,\n capture_locals=capture_locals,\n _seen=_seen)\n else:\n context = None\n self.exc_traceback = exc_traceback\n self.__cause__ = cause\n self.__context__ = context\n self.__suppress_context__ = \\\n exc_value.__suppress_context__ if exc_value else False\n # TODO: locals.\n self.stack = StackSummary.extract(\n walk_tb(exc_traceback), limit=limit, lookup_lines=lookup_lines,\n capture_locals=capture_locals)\n self.exc_type = exc_type\n # Capture now to permit freeing resources: only complication is in the\n # unofficial API _format_final_exc_line\n self._str = _some_str(exc_value)\n if exc_type and issubclass(exc_type, SyntaxError):\n # Handle SyntaxError's specially\n self.filename = exc_value.filename\n self.lineno = str(exc_value.lineno)\n self.text = exc_value.text\n self.offset = exc_value.offset\n self.msg = exc_value.msg\n #if lookup_lines:\n # self._load_lines()\n\n @classmethod\n def from_exception(cls, exc, *args, **kwargs):\n \"\"\"Create a TracebackException from an exception.\"\"\"\n return cls(type(exc), exc, exc.__traceback__, *args, **kwargs)\n\n #def _load_lines(self):\n # \"\"\"Private API. force all lines in the stack to be loaded.\"\"\"\n # for frame in self.stack:\n # frame.line\n # if self.__context__:\n # self.__context__._load_lines()\n # if self.__cause__:\n # self.__cause__._load_lines()\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __str__(self):\n return self._str\n\n def format_exception_only(self):\n \"\"\"Format the exception part of the traceback.\n\n The return value is a generator of strings, each ending in a newline.\n\n Normally, the generator emits a single string; however, for\n SyntaxError exceptions, it emites several lines that (when\n printed) display detailed information about where the syntax\n error occurred.\n\n The message indicating which exception occurred is always the last\n string in the output.\n \"\"\"\n if self.exc_type is None:\n yield _format_final_exc_line(None, self._str)\n return\n\n stype = self.exc_type.__name__\n if hasattr(self.exc_type, \"__module__\"):\n smod = self.exc_type.__module__\n else:\n smod = \"builtins\"\n if smod not in (\"__main__\", \"builtins\"):\n stype = smod + '.' + stype\n\n if not issubclass(self.exc_type, SyntaxError):\n yield _format_final_exc_line(stype, self._str)\n return\n\n # It was a syntax error; show exactly where the problem was found.\n filename = self.filename or \"\"\n lineno = str(self.lineno) or '?'\n yield ' File \"{}\", line {}\\n'.format(filename, lineno)\n\n badline = self.text\n offset = self.offset\n if badline is not None:\n yield ' {}\\n'.format(badline.strip())\n if offset is not None:\n caretspace = badline.rstrip('\\n')\n offset = min(len(caretspace), offset) - 1\n caretspace = caretspace[:offset].lstrip()\n # non-space whitespace (likes tabs) must be kept for alignment\n caretspace = ((c.isspace() and c or ' ') for c in caretspace)\n yield ' {}^\\n'.format(''.join(caretspace))\n msg = self.msg or \"\"\n yield \"{}: {}\\n\".format(stype, msg)\n\n def format(self, chain=True):\n \"\"\"Format the exception.\n\n If chain is not *True*, *__cause__* and *__context__* will not be formatted.\n\n The return value is a generator of strings, each ending in a newline and\n some containing internal newlines. `print_exception` is a wrapper around\n this method which just prints the lines to a file.\n\n The message indicating which exception occurred is always the last\n string in the output.\n \"\"\"\n if chain:\n if self.__cause__ is not None:\n #yield from self.__cause__.format(chain=chain)\n for g in self.__cause__.format(chain=chain):\n yield g\n yield _cause_message\n elif (self.__context__ is not None and\n not self.__suppress_context__):\n #yield from self.__context__.format(chain=chain)\n for g in self.__cause__.format(chain=chain):\n yield g\n yield _context_message\n if self.exc_traceback is not None:\n yield 'Traceback (most recent call last):\\n'\n #yield from self.stack.format()\n for g in self.stack.format():\n yield g\n #yield from self.format_exception_only()\n for g in self.format_exception_only():\n yield g\n","src/lib/tty.py":"raise NotImplementedError(\"tty is not yet implemented in Skulpt\")\n","src/lib/turtle.js":"var $builtinmodule=function(){\"use strict\";var e=function(){let e,t=Sk.TurtleGraphics&&Sk.TurtleGraphics.target||\"turtle\";for(e=\"string\"==typeof t?document.getElementById(t):\"function\"==typeof t?t():t;e.firstChild;)e.removeChild(e.firstChild);return e}();return e.turtleInstance?e.turtleInstance.reset():e.turtleInstance=function(e){var N=Math.round,G=Math.max,H=Math.sqrt,W=Math.min,V=Math.abs,K=Math.PI,j=Math.atan2,J=Math.sin,X=Math.cos;function t(e){var t=ee.assets,n=\"function\"==typeof t?t(e):t[e];return\"string\"==typeof n?new Promise(function(t,r){var a=new Image;a.onload=function(){ee.assets[e]=this,t(a)},a.onerror=function(){r(new Error(\"Missing asset: \"+n))},a.src=n}):new r(void 0,n)}function r(e,t){this.lastResult=t,this.lastError=e}function n(){this.reset()}function a(){return ce||(ce=new n),ce}function s(){var t=this;for(var r in this._target=_(),this._managers={},this._handlers={mousedown:function(r){t.onEvent(\"mousedown\",r)},mouseup:function(r){t.onEvent(\"mouseup\",r)},mousemove:function(r){t.onEvent(\"mousemove\",r)}},this._handlers)this._target.addEventListener(r,this._handlers[r])}function l(e,t){this._type=e,this._target=t,this._handlers=void 0,u().addManager(e,this)}function i(){a().addTurtle(this),this._screen=c(),this._managers={},this.reset()}function o(){var e,t;this._frames=1,this._delay=void 0,this._bgcolor=\"none\",this._mode=\"standard\",this._managers={},this._keyLogger={},e=(ee.worldWidth||ee.width||m())/2,t=(ee.worldHeight||ee.height||g())/2,this.setUpWorld(-e,-t,e,t)}function d(){return te||(te=Sk.misceval.callsimArray(ne.Turtle)),te.instance}function _(){return e}function c(){return Z||(Z=new o),Z}function u(){return re||(re=new s),re}function m(){return 0|(Z&&Z._width||ee.width||_().clientWidth||_e.width)}function g(){return 0|(Z&&Z._height||ee.height||_().clientHeight||_e.height)}function p(e,t){var r,n=document.createElement(\"canvas\"),a=m(),s=g(),l=_().firstChild?-s+\"px\":\"0\";return n.width=a,n.height=s,n.style.position=\"relative\",n.style.display=\"block\",n.style.setProperty(\"margin-top\",l),n.style.setProperty(\"z-index\",e),t&&(n.style.display=\"none\"),_().appendChild(n),r=n.getContext(\"2d\"),r.lineCap=\"round\",r.lineJoin=\"round\",h(c(),r),r}function f(){Y&&((window.cancelAnimationFrame||window.mozCancelAnimationFrame)(Y),Y=void 0),Q&&(window.clearTimeout(Q),Q=void 0)}function h(e,t){var r=e.llx,n=e.lly,a=e.urx,s=e.ury,l=e.xScale,i=e.yScale;t&&(v(t),t.restore(),t.save(),t.scale(1/l,1/i),0===n?t.translate(-r,n-(s-n)):0e._bufferSize;)e._undoBuffer.shift();for(r={},t=[\"x\",\"y\",\"angle\",\"radians\",\"color\",\"fill\",\"down\",\"filling\",\"shown\",\"shape\",\"size\"],n=0;na;a++)if(\"number\"==typeof t[a])t[a]=G(0,W(255,parseInt(t[a])));else throw new Sk.builtin.ValueError(\"bad color sequence\");}else for(a=0;3>a;a++)if(\"number\"!=typeof t[a])throw new Sk.builtin.ValueError(\"bad color sequence\");else if(1>=t[a])t[a]=G(0,W(255,parseInt(255*t[a])));else throw new Sk.builtin.ValueError(\"bad color sequence\");\"number\"==typeof t[a]?(t[3]=G(0,W(1,t[a])),t=\"rgba(\"+t.join(\",\")+\")\"):t=\"rgb(\"+t.slice(0,3).join(\",\")+\")\"}else if(\"string\"==typeof t&&!t.match(/\\s*url\\s*\\(/i))t=t.replace(/\\s+/g,\"\");else return\"black\";return t}function O(e,t,r){var n=e._angle||0,a=e._radians||0;return r||(r={}),\"number\"==typeof t&&(e._isRadians?n=a=t%i.RADIANS:e._fullCircle?(n=t%e._fullCircle,a=n/e._fullCircle*i.RADIANS):n=a=0,0>n&&(n+=e._fullCircle,a+=i.RADIANS)),r.angle=n,r.radians=a,r}function E(e,t){return function(){var r=Array.prototype.slice.call(arguments),n=r.map(function(e){return Sk.ffi.remapToPy(e)});return\"undefined\"!=typeof t&&n.unshift(t),Sk.misceval.applyAsync(void 0,e,void 0,void 0,void 0,n).catch(Sk.uncaughtException)}}function F(e,t,n,a){var s,l=n.replace(/^\\$/,\"\"),o=l.replace(/_\\$[a-z]+\\$$/i,\"\"),d=e.prototype[n].length,_=e.prototype[n].minArgs,c=e.prototype[n].co_varnames||[],u=e.prototype[n].returnType,m=e.prototype[n].isSk;void 0===_&&(_=d),s=function(){var e,t,s,l,c,g=Array.prototype.slice.call(arguments,0),p=a?a():g.shift().instance;if(g.length<_||g.length>d)throw c=_===d?\"exactly \"+d:\"between \"+_+\" and \"+d,new Sk.builtin.TypeError(o+\"() takes \"+c+\" positional argument(s) (\"+g.length+\" given)\");for(e=g.length;0<=--e;)void 0!==g[e]&&(g[e]=g[e]instanceof Sk.builtin.func?E(g[e]):g[e]instanceof Sk.builtin.method?E(g[e].im_func,g[e].im_self):g[e]&&g[e].$d instanceof Sk.builtin.dict&&g[e].instance?g[e].instance:Sk.ffi.remapToJs(g[e]));var f=g.slice();for(g=[],e=f.length;0<=e;--e)null!==f[e]&&(g[e]=f[e]);try{t=p[n].apply(p,g)}catch(t){throw window&&window.console&&(window.console.log(\"wrapped method failed\"),window.console.log(t.stack)),t}return t instanceof r&&(t=t.lastResult),t instanceof Promise?(t=t.catch(function(t){throw window&&window.console&&(window.console.log(\"promise failed\"),window.console.log(t.stack)),t}),s=new Sk.misceval.Suspension,s.resume=function(){return void 0===l?Sk.builtin.none.none$:Sk.ffi.remapToPy(l)},s.data={type:\"Sk.promise\",promise:t.then(function(e){return l=e,e})},s):void 0===t?Sk.builtin.none.none$:m?t:\"function\"==typeof u?u(t):Sk.ffi.remapToPy(t)},s.co_varnames=c.slice(),s.$defaults=[];for(var g=_;gt;t++)e[t]=Sk.builtin.assk$(e[t]);return 4===e.length&&(e[3]=Sk.builtin.float_(e[3])),new Sk.builtin.tuple(e)},de.TURTLE_LIST=function(e){for(var t=[],r=0;rt&&(t+=this._fullCircle),this.rotate(e,t-e)},e.getManager=function(e){return this._managers[e]||(this._managers[e]=new l(e,this)),this._managers[e]},e.getPaper=function(){return this._paper||(this._paper=p(2))},e.reset=function(){for(var e in this._x=0,this._y=0,this._radians=0,this._angle=0,this._shown=!0,this._down=!0,this._color=\"black\",this._fill=\"black\",this._shape=\"classic\",this._size=1,this._filling=!1,this._undoBuffer=[],this._speed=3,this._computed_speed=5,this._colorMode=1,this._state=void 0,this._managers)this._managers[e].reset();this._isRadians=!1,this._fullCircle=360,this._bufferSize=\"number\"==typeof ee.bufferSize?ee.bufferSize:0,b(this._paper),this._paper=void 0},e.$degrees=function(e){return e=\"number\"==typeof e?V(e):360,this._isRadians=!1,this._angle=e&&this._fullCircle?this._angle/this._fullCircle*e:this._radians=0,this._fullCircle=e,this.addUpdate(void 0,!1,{angle:this._angle,radians:this._radians})},e.$degrees.minArgs=0,e.$degrees.co_varnames=[\"fullcircle\"],e.$degrees.returnType=de.FLOAT,e.$radians=function(){return this._isRadians||(this._isRadians=!0,this._angle=this._radians,this._fullCircle=i.RADIANS),this._angle},e.$radians.returnType=de.FLOAT,e.$position=e.$pos=function(){return[this.$xcor(),this.$ycor()]},e.$position.returnType=function(e){return new Sk.builtin.tuple([Sk.builtin.float_(e[0]),Sk.builtin.float_(e[1])])},e.$towards=function(e,t){var r=z(e,t),n=K+j(this._y-r.y,this._x-r.x),a=n*(this._fullCircle/i.RADIANS);return a},e.$towards.co_varnames=[\"x\",\"y\"],e.$towards.minArgs=1,e.$towards.returnType=de.FLOAT,e.$distance=function(e,t){var r=z(e,t),n=r.x-this._x,a=r.y-this._y;return H(n*n+a*a)},e.$distance.co_varnames=[\"x\",\"y\"],e.$distance.minArgs=1,e.$distance.returnType=de.FLOAT,e.$heading=function(){return 1e-13>V(this._angle)?0:this._angle},e.$heading.returnType=de.FLOAT,e.$xcor=function(){return 1e-13>V(this._x)?0:this._x},e.$xcor.returnType=de.FLOAT,e.$ycor=function(){return 1e-13>V(this._y)?0:this._y},e.$ycor.returnType=de.FLOAT,e.$forward=e.$fd=function(e){return $(this),this.queueMoveBy(this._x,this._y,this._radians,e)},e.$forward.co_varnames=e.$fd.co_varnames=[\"distance\"],e.$undo=function(){w(this)},e.$undobufferentries=function(){return this._undoBuffer.length},e.$setundobuffer=function(e){this._bufferSize=\"number\"==typeof e?W(V(e),1e3):0},e.$setundobuffer.co_varnames=[\"size\"],e.$backward=e.$back=e.$bk=function(e){return $(this),this.queueMoveBy(this._x,this._y,this._radians,-e)},e.$backward.co_varnames=e.$back.co_varnames=e.$bk.co_varnames=[\"distance\"],e.$goto_$rw$=e.$setpos=e.$setposition=function(e,t){var r=z(e,t);return $(this),this.translate(this._x,this._y,r.x-this._x,r.y-this._y,!0)},e.$goto_$rw$.co_varnames=e.$setpos.co_varnames=e.$setposition.co_varnames=[\"x\",\"y\"],e.$goto_$rw$.minArgs=e.$setpos.minArgs=e.$setposition.minArgs=1,e.$setx=function(e){return this.translate(this._x,this._y,e-this._x,0,!0)},e.$setx.co_varnames=[\"x\"],e.$sety=function(e){return this.translate(this._x,this._y,0,e-this._y,!0)},e.$sety.co_varnames=[\"y\"],e.$home=function(){var e=this,t=this._angle;return $(this),e.translate(this._x,this._y,-this._x,-this._y,!0).then(function(){return e.queueTurnTo(t,0)}).then(function(){})},e.$right=e.$rt=function(e){return $(this),this.rotate(this._angle,-e)},e.$right.co_varnames=e.$rt.co_varnames=[\"angle\"],e.$left=e.$lt=function(e){return $(this),this.rotate(this._angle,e)},e.$left.co_varnames=e.$lt.co_varnames=[\"angle\"],e.$setheading=e.$seth=function(e){return $(this),this.queueTurnTo(this._angle,e)},e.$setheading.co_varnames=e.$seth.co_varnames=[\"angle\"],e.$circle=function(e,s,o){var d,_,u,m,g,p,f,h,b,v=this,k=this._x,T=this._y,A=this._angle,L={},S=1/c().lineScale,C=!0;for($(this),void 0===s&&(s=v._fullCircle),void 0===o&&(_=V(s)/v._fullCircle,o=1+(0|W(11+V(e*S)/6,59)*_)),u=s/o,m=.5*u,g=2*e*J(u*K/v._fullCircle),0>e?(g=-g,u=-u,m=-m,d=A-s):d=A+s,b=a().willRenderNext()?Promise.resolve():new r,A+=m,p=0;p=e&&(e=m()*e),1>=t&&(t=g()*t),this._width=e,this._height=t,this._xOffset=void 0===r||isNaN(parseInt(r))?0:parseInt(r),this._yOffset=void 0===n||isNaN(parseInt(n))?0:parseInt(n),\"world\"===this._mode?this._setworldcoordinates(this.llx,this.lly,this.urx,this.ury):this._setworldcoordinates(-e/2,-t/2,e/2,t/2)},e.$setup.minArgs=0,e.$setup.co_varnames=[\"width\",\"height\",\"startx\",\"starty\"],e.$register_shape=e.$addshape=function(e,r){return r?void(ie[e]=r):t(e).then(function(t){ie[e]=t})},e.$register_shape.minArgs=1,e.$getshapes=function(){return Object.keys(ie)},e.$tracer=function(e,t){return void 0!==e||void 0!==t?(\"number\"==typeof t&&(this._delay=t,a().refreshInterval(t)),\"number\"==typeof e?(this._frames=e,a().frameBuffer(e)):void 0):this._frames},e.$tracer.co_varnames=[\"frames\",\"delay\"],e.$tracer.minArgs=0,e.$delay=function(e){return void 0===e?void 0===this._delay?le:this._delay:this.$tracer(void 0,e)},e.$delay.co_varnames=[\"delay\"],e._setworldcoordinates=function(e,t,r,n){var s=this,l=a().turtles();return this.setUpWorld(e,t,r,n),this._sprites&&h(this,this._sprites),this._background&&h(this,this._background),this.$clear()},e.$setworldcoordinates=function(e,t,r,n){return this._mode=\"world\",this._setworldcoordinates(e,t,r,n)},e.$setworldcoordinates.co_varnames=[\"llx\",\"lly\",\"urx\",\"ury\"],e.minArgs=4,e.$clear=e.$clearscreen=function(){return this.reset(),this.$reset()},e.$update=function(){return a().update()},e.$reset=e.$resetscreen=function(){var e=this,t=a().turtles();return a().addFrame(function(){h(e,e._sprites),h(e,e._background);for(var r=0;r 15:\n trimActual = True\n actualType = type(actual)\n trimExpected = False\n if len(str(expected)) > 15:\n trimExpected = True\n expectedType = type(expected)\n row = document.createElement('tr')\n err = False\n if res == 'Error':\n err = True\n msg = 'Error: %s' % param\n errorData = document.createElement('td')\n errorData.setAttribute('class','ac-feedback')\n errorData.innerHTML = 'ERROR'\n errorData.setCSS('background-color','#de8e96')\n errorData.setCSS('text-align','center')\n row.appendChild(errorData)\n elif res:\n passed = document.createElement('td')\n passed.setAttribute('class','ac-feedback')\n passed.innerHTML = 'Pass'\n passed.setCSS('background-color','#83d382')\n passed.setCSS('text-align','center')\n row.appendChild(passed)\n self.numPassed += 1\n else:\n fail = document.createElement('td')\n fail.setAttribute('class','ac-feedback')\n fail.innerHTML = 'Fail'\n fail.setCSS('background-color','#de8e96')\n fail.setCSS('text-align','center')\n row.appendChild(fail)\n self.numFailed += 1\n\n\n act = document.createElement('td')\n act.setAttribute('class','ac-feedback')\n if trimActual:\n actHTML = str(actual)[:5] + \"...\" + str(actual)[-5:]\n if actualType == str:\n actHTML = repr(actHTML)\n act.innerHTML = actHTML\n else:\n act.innerHTML = repr(actual)\n act.setCSS('text-align','center')\n row.appendChild(act)\n\n expect = document.createElement('td')\n expect.setAttribute('class','ac-feedback')\n\n if trimExpected:\n expectedHTML = str(expected)[:5] + \"...\" + str(expected)[-5:]\n if expectedType == str:\n expectedHTML = repr(expectedHTML)\n expect.innerHTML = expectedHTML\n else:\n expect.innerHTML = repr(expected)\n expect.setCSS('text-align','center')\n row.appendChild(expect)\n inp = document.createElement('td')\n inp.setAttribute('class','ac-feedback')\n\n if err:\n inp.innerHTML = msg\n else:\n inp.innerHTML = param\n inp.setCSS('text-align','center')\n row.appendChild(inp)\n self.resTable.appendChild(row)\n\n\n def showSummary(self):\n pct = self.numPassed / (self.numPassed+self.numFailed) * 100\n pTag = document.createElement('p')\n pTag.innerHTML = \"You passed: \" + str(pct) + \"% of the tests\"\n self.resdiv.appendChild(pTag)\n","src/lib/unittest/mock.py":"def _dot_lookup(thing, comp, import_path):\n try:\n return getattr(thing, comp)\n except AttributeError:\n __import__(import_path)\n return getattr(thing, comp)\n\n\ndef _importer(target):\n components = target.split('.')\n import_path = components.pop(0)\n thing = __import__(import_path)\n for comp in components:\n import_path += \".%s\" % comp\n thing = _dot_lookup(thing, comp, import_path)\n return thing\n\n\ndef rsplit(a_str, sep, howmany):\n broken = a_str.split(sep)\n where = len(broken) - howmany\n if len(broken) == 1:\n return broken\n front, back = broken[:where], broken[where:]\n back.insert(0, sep.join(front))\n return back\n\n\ndef _get_target(target):\n try:\n target, attribute = rsplit(target, '.', 1)\n except (TypeError, ValueError):\n raise TypeError(\"Need a valid target to patch. You supplied: %r\" %\n (target,))\n getter = lambda: _importer(target)\n return getter, attribute\n\n\nclass Patch:\n def __init__(self, target, new, return_value):\n self.target = target\n self.new = new\n self.return_value = return_value\n self.getter, self.attribute = _get_target(target)\n self.backup = None\n\n def get_original(self):\n target = self.getter()\n name = self.attribute\n try:\n original = target.__dict__[name]\n except (AttributeError, KeyError):\n original = getattr(target, name, None)\n return original\n\n def start(self):\n self.backup = self.get_original()\n if self.new:\n new_attr = self.new\n else:\n new_attr = self.return_value\n setattr(self.getter(), self.attribute, new_attr)\n\n def stop(self):\n setattr(self.getter(), self.attribute, self.backup)\n if self.target == 'sys.modules':\n self.getter().modules['sys'].modules = self.backup\n\n\ndef pass_through(target, new=None, return_value=None):\n return Patch(target, new, return_value)\n\n\npatch = pass_through\npatch.dict = pass_through","src/lib/unittest/__init__.py":"__author__ = 'bmiller'\n'''\nThis is the start of something that behaves like\nthe unittest module from cpython.\n\n'''\n\nclass TestCase:\n def __init__(self):\n self.numPassed = 0\n self.numFailed = 0\n self.assertPassed = 0\n self.assertFailed = 0\n self.verbosity = 1\n self.tlist = []\n testNames = {}\n for name in dir(self):\n if name[:4] == 'test' and name not in testNames:\n self.tlist.append(getattr(self,name))\n testNames[name]=True\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n \n def cleanName(self,funcName):\n # work around skulpts lack of an __name__\n funcName = str(funcName)\n funcName = funcName[13:]\n funcName = funcName[:funcName.find('<')-3]\n return funcName\n\n def main(self):\n\n for func in self.tlist:\n if self.verbosity > 1:\n print('Running %s' % self.cleanName(func))\n try:\n self.setUp()\n self.assertPassed = 0\n self.assertFailed = 0\n func()\n self.tearDown()\n if self.assertFailed == 0:\n self.numPassed += 1\n else:\n self.numFailed += 1\n print('Tests failed in %s ' % self.cleanName(func))\n except Exception as e:\n self.assertFailed += 1\n self.numFailed += 1\n print('Test threw exception in %s (%s)' % (self.cleanName(func), e))\n self.showSummary()\n\n def assertEqual(self, actual, expected, feedback=\"\"):\n res = actual==expected\n if not res and feedback == \"\":\n feedback = \"Expected %s to equal %s\" % (str(actual),str(expected))\n self.appendResult(res, actual ,expected, feedback)\n\n def assertNotEqual(self, actual, expected, feedback=\"\"):\n res = actual != expected\n if not res and feedback == \"\":\n feedback = \"Expected %s to not equal %s\" % (str(actual),str(expected))\n self.appendResult(res, actual, expected, feedback)\n\n def assertTrue(self,x, feedback=\"\"):\n res = bool(x) is True\n if not res and feedback == \"\":\n feedback = \"Expected %s to be True\" % (str(x))\n self.appendResult(res, x, True, feedback)\n\n def assertFalse(self,x, feedback=\"\"):\n res = not bool(x)\n if not res and feedback == \"\":\n feedback = \"Expected %s to be False\" % (str(x))\n self.appendResult(res, x, False, feedback)\n\n def assertIs(self,a,b, feedback=\"\"):\n res = a is b\n if not res and feedback == \"\":\n feedback = \"Expected %s to be the same object as %s\" % (str(a),str(b))\n self.appendResult(res, a, b, feedback)\n\n def assertIsNot(self,a,b, feedback=\"\"):\n res = a is not b\n if not res and feedback == \"\":\n feedback = \"Expected %s to not be the same object as %s\" % (str(a),str(b))\n self.appendResult(res, a, b, feedback)\n\n def assertIsNone(self,x, feedback=\"\"):\n res = x is None\n if not res and feedback == \"\":\n feedback = \"Expected %s to be None\" % (str(x))\n self.appendResult(res, x, None, feedback)\n\n def assertIsNotNone(self,x, feedback=\"\"):\n res = x is not None\n if not res and feedback == \"\":\n feedback = \"Expected %s to not be None\" % (str(x))\n self.appendResult(res, x, None, feedback)\n\n def assertIn(self, a, b, feedback=\"\"):\n res = a in b\n if not res and feedback == \"\":\n feedback = \"Expected %s to be in %s\" % (str(a),str(b))\n self.appendResult(res, a, b, feedback)\n\n def assertNotIn(self, a, b, feedback=\"\"):\n res = a not in b\n if not res and feedback == \"\":\n feedback = \"Expected %s to not be in %s\" % (str(a),str(b))\n self.appendResult(res, a, b, feedback)\n\n def assertIsInstance(self,a,b, feedback=\"\"):\n res = isinstance(a,b)\n if not res and feedback == \"\":\n feedback = \"Expected %s to be an instance of %s\" % (str(a), str(b))\n self.appendResult(res, a, b, feedback)\n\n def assertNotIsInstance(self,a,b, feedback=\"\"):\n res = not isinstance(a,b)\n if not res and feedback == \"\":\n feedback = \"Expected %s to not be an instance of %s\" % (str(a),str(b))\n self.appendResult(res, a, b, feedback)\n\n def assertAlmostEqual(self, a, b, places=7, feedback=\"\", delta=None):\n\n if delta is not None:\n res = abs(a-b) <= delta\n else:\n if places is None:\n places = 7\n res = round(a-b, places) == 0\n \n if not res and feedback == \"\":\n feedback = \"Expected %s to equal %s\" % (str(a),str(b))\n self.appendResult(res, a, b, feedback)\n\n def assertNotAlmostEqual(self, a, b, places=7, feedback=\"\", delta=None):\n\n if delta is not None:\n res = not (a == b) and abs(a - b) > delta\n else:\n if places is None:\n places = 7\n\n res = round(a-b, places) != 0\n\n if not res and feedback == \"\":\n feedback = \"Expected %s to not equal %s\" % (str(a),str(b))\n self.appendResult(res, a, b, feedback)\n\n def assertGreater(self,a,b, feedback=\"\"):\n res = a > b\n if not res and feedback == \"\":\n feedback = \"Expected %s to be greater than %s\" % (str(a),str(b))\n self.appendResult(res, a, b, feedback)\n\n def assertGreaterEqual(self,a,b, feedback=\"\"):\n res = a >= b\n if not res and feedback == \"\":\n feedback = \"Expected %s to be >= %s\" % (str(a),str(b))\n self.appendResult(res, a, b, feedback)\n\n def assertLess(self, a, b, feedback=\"\"):\n res = a < b\n if not res and feedback == \"\":\n feedback = \"Expected %s to be less than %s\" % (str(a),str(b))\n self.appendResult(res, a, b, feedback)\n\n def assertLessEqual(self,a,b, feedback=\"\"):\n res = a <= b\n if not res and feedback == \"\":\n feedback = \"Expected %s to be <= %s\" % (str(a),str(b))\n self.appendResult(res, a, b, feedback)\n\n def appendResult(self,res,actual,expected,feedback):\n if res:\n msg = 'Pass'\n self.assertPassed += 1\n else:\n msg = 'Fail: ' + feedback\n print(msg)\n self.assertFailed += 1\n\n def assertRaises(self, exception, callable=None, *args, **kwds):\n # with is currently not supported hence we just try and catch\n if callable is None:\n raise NotImplementedError(\"assertRaises does currently not support assert contexts\")\n if kwds:\n raise NotImplementedError(\"assertRaises does currently not support **kwds\")\n\n res = False\n actualerror = str(exception())\n try:\n callable(*args)\n except exception as ex:\n res = True\n except Exception as inst:\n actualerror = str(inst)\n print(\"ACT = \", actualerror, str(exception()))\n else:\n actualerror = \"No Error\"\n\n self.appendResult(res, str(exception()), actualerror, \"\")\n\n def fail(self, msg=None):\n if msg is None:\n msg = 'Fail'\n else:\n msg = 'Fail: ' + msg\n print(msg)\n self.assertFailed += 1\n\n def showSummary(self):\n pct = self.numPassed / (self.numPassed+self.numFailed) * 100\n print(\"Ran %d tests, passed: %d failed: %d\\n\" % (self.numPassed+self.numFailed,\n self.numPassed, self.numFailed))\n\n\n\ndef main(verbosity=1):\n glob = globals() # globals() still needs work\n for name in glob:\n if type(glob[name]) == type and issubclass(glob[name], TestCase):\n try:\n tc = glob[name]()\n tc.verbosity = verbosity\n tc.main()\n except:\n print(\"Uncaught Error in: \", name)\n","src/lib/urllib/request/__init__.js":"var $builtinmodule=function(){var a={};return a.Response=Sk.misceval.buildClass(a,function(a,b){b.__init__=new Sk.builtin.func(function(a,b){a.data$=b.responseText,a.lineList=a.data$.split(\"\\n\"),a.lineList=a.lineList.slice(0,-1);for(var c=0;c\")}),b.__iter__=new Sk.builtin.func(function(a){var b=a.lineList;return Sk.builtin.makeGenerator(function(){return this.$index>=this.$lines.length?void 0:new Sk.builtin.str(this.$lines[this.$index++])},{$obj:a,$index:0,$lines:b})}),b.read=new Sk.builtin.func(function(a,b){if(a.closed)throw new Sk.builtin.ValueError(\"I/O operation on closed file\");var c=a.data$.length;void 0===b&&(b=c);var d=new Sk.builtin.str(a.data$.substr(a.pos$,b));return a.pos$+=b,a.pos$>=c&&(a.pos$=c),d}),b.readline=new Sk.builtin.func(function(a){var b=\"\";return a.currentLinee;e++)d.elements[4*e+0]=b.elements[4*e+0]*c.elements[0]+b.elements[4*e+1]*c.elements[4]+b.elements[4*e+2]*c.elements[8]+b.elements[4*e+3]*c.elements[12],d.elements[4*e+1]=b.elements[4*e+0]*c.elements[1]+b.elements[4*e+1]*c.elements[5]+b.elements[4*e+2]*c.elements[9]+b.elements[4*e+3]*c.elements[13],d.elements[4*e+2]=b.elements[4*e+0]*c.elements[2]+b.elements[4*e+1]*c.elements[6]+b.elements[4*e+2]*c.elements[10]+b.elements[4*e+3]*c.elements[14],d.elements[4*e+3]=b.elements[4*e+0]*c.elements[3]+b.elements[4*e+1]*c.elements[7]+b.elements[4*e+2]*c.elements[11]+b.elements[4*e+3]*c.elements[15];return b.elements=d.elements,b}),c.lookAt=new Sk.builtin.func(function(b,c,e,f,g,h,i,j,k,l){var m=[c-g,e-h,f-i],n=d(m[0]*m[0]+m[1]*m[1]+m[2]*m[2]);n&&(m[0]/=n,m[1]/=n,m[2]/=n);var o=[j,k,l],p=[];p[0]=o[1]*m[2]-o[2]*m[1],p[1]=-o[0]*m[2]+o[2]*m[0],p[2]=o[0]*m[1]-o[1]*m[0],o[0]=m[1]*p[2]-m[2]*p[1],o[1]=-m[0]*p[2]+m[2]*p[0],o[2]=m[0]*p[1]-m[1]*p[0],n=d(p[0]*p[0]+p[1]*p[1]+p[2]*p[2]),n&&(p[0]/=n,p[1]/=n,p[2]/=n),n=d(o[0]*o[0]+o[1]*o[1]+o[2]*o[2]),n&&(o[0]/=n,o[1]/=n,o[2]/=n);var q=Sk.misceval.callsimArray(a.Mat44);return q.elements[0]=p[0],q.elements[4]=p[1],q.elements[8]=p[2],q.elements[12]=0,q.elements[1]=o[0],q.elements[5]=o[1],q.elements[9]=o[2],q.elements[13]=0,q.elements[2]=m[0],q.elements[6]=m[1],q.elements[10]=m[2],q.elements[14]=0,q.elements[3]=0,q.elements[7]=0,q.elements[11]=0,q.elements[15]=1,q=q.multiply(b),b.elements=q.elements,b.translate(-c,-e,-f),b})},\"Mat44\",[]),a.Mat33=Sk.misceval.buildClass(a,function(a,b){b.__init__=new Sk.builtin.func(function(a){Sk.misceval.callsimArray(b.loadIdentity,[a])}),b.loadIdentity=new Sk.builtin.func(function(a){a.elements=[1,0,0,0,1,0,0,0,1]})},\"Mat33\",[]),a.Vec3=Sk.misceval.buildClass(a,function(b,c){c.__init__=new Sk.builtin.func(function(a,b,c,d){a.x=b,a.y=c,a.z=d}),c.__sub__=new Sk.builtin.func(function(b,c){return Sk.misceval.callsimArray(a.Vec3,[b.x-c.x,b.y-c.y,b.z-c.z])})},\"Vec3\",[]),a.cross=new Sk.builtin.func(function(b,c){return Sk.asserts.assert(b instanceof a.Vec3&&c instanceof a.Vec3),Sk.misceval.callsimArray(a.Vec3,[b.y*c.z-b.z*c.y,b.z*c.x-b.x*c.z,b.x*c.y-b.y*c.x])}),a};","src/lib/webgl/matrix4.js":"var $builtinmodule=function(){var a=Math.PI,b={},c=new Float32Array(3),d=new Float32Array(3),e=new Float32Array(3),f=new Float32Array(4),g=new Float32Array(4),h=new Float32Array(4),i=new Float32Array(16),j=new Float32Array(16),k=new Float32Array(16),l=function(b,c){for(var a=Math.sqrt,d=0,e=c.length,f=0;fe;++e)for(var f=0;4>f;++f)c[4*e+f]=d[4*f+e];return c}),b};","src/lib/webgl/models.js":"var $builtinmodule=function(a){var c={},d=function(a,c){var d=c||gl.ARRAY_BUFFER,e=gl.createBuffer();if(this.target=d,this.buf=e,this.set(a),this.numComponents_=a.numComponents,this.numElements_=a.numElements,this.totalComponents_=this.numComponents_*this.numElements_,a.buffer instanceof Float32Array)this.type_=gl.FLOAT;else if(a.buffer instanceof Uint8Array)this.type_=gl.UNSIGNED_BYTE;else if(a.buffer instanceof Int8Array)this.type_=gl._BYTE;else if(a.buffer instanceof Uint16Array)this.type_=gl.UNSIGNED_SHORT;else if(a.buffer instanceof Int16Array)this.type_=gl.SHORT;else throw\"unhandled type:\"+typeof a.buffer};return d.prototype.set=function(a){gl.bindBuffer(this.target,this.buf),gl.bufferData(this.target,a.buffer,gl.STATIC_DRAW)},d.prototype.type=function(){return this.type_},d.prototype.numComponents=function(){return this.numComponents_},d.prototype.numElements=function(){return this.numElements_},d.prototype.totalComponents=function(){return this.totalComponents_},d.prototype.buffer=function(){return this.buf},d.prototype.stride=function(){return 0},d.prototype.offset=function(){return 0},c.Model=Sk.misceval.buildClass(c,function(c,e){e.__init__=new Sk.builtin.func(function(c,e,f,g){c.buffers={};var h=function(a,e){var f=\"indices\"==a?gl.ELEMENT_ARRAY_BUFFER:gl.ARRAY_BUFFER;b=c.buffers[a],b?b.set(e):b=new d(e,f),c.buffers[a]=b};for(a in f)h(a,f[a]);var i={},j=0;for(var k in g)i[k]=j++;c.mode=gl.TRIANGLES,c.textures=g.v,c.textureUnits=i,c.shader=e}),e.drawPrep=new Sk.builtin.func(function(a,c){var d=a.shader,e=a.buffers,f=a.textures;for(var g in c=Sk.ffi.remapToJs(c),Sk.misceval.callsimArray(d.use,[d]),e){var h=e[g];if(\"indices\"==g)gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER,h.buffer());else{var i=d.attrib[g];i&&i(h)}}for(var j in f){var k=a.textureUnits[j];d.setUniform$impl(d,textuer,k),f[j].bindToUnit(k)}for(var l in c)d.setUniform$impl(d,l,c[l])}),e.draw=new Sk.builtin.func(function(a,c,d){var e=a.shader;for(uniform in c=Sk.ffi.remapToJs(c),c)e.setUniform$impl(e,uniform,c[uniform]);if(d)for(var f in d){var g=a.textureUnits[f];e.setUniform$impl(e,f,g),d[f].bindToUnit(g)}var h=a.buffers;gl.drawElements(a.mode,h.indices.totalComponents(),gl.UNSIGNED_SHORT,0)})},\"Model\",[]),c};","src/lib/webgl/primitives.js":"var $builtinmodule=function(){var a={},b=function(a,b,c){c=c||\"Float32Array\";var d=window[c];b.length?(this.buffer=new d(b),b=this.buffer.length/a,this.cursor=b):(this.buffer=new d(a*b),this.cursor=0),this.numComponents=a,this.numElements=b,this.type=c};return b.prototype.stride=function(){return 0},b.prototype.offset=function(){return 0},b.prototype.getElement=function(a){for(var b=a*this.numComponents,c=[],d=0;do;++o){c=d[o];for(var p=0;4>p;++p){var q=g[c[p]],r=h[o],s=i[p];k.push(q),l.push(r),m.push(s)}var t=4*o;n.push([t+0,t+1,t+2]),n.push([t+0,t+2,t+3])}return{position:k,normal:l,texCoord:m,indices:n}}),a};","src/lib/webgl/__init__.js":"var $builtinmodule=function(){var a={},c=function(a){return\"\"},d=\"This page requires a browser that supports WebGL.
Click here to upgrade your browser.\",e=function(a){for(var b=[\"webgl\",\"experimental-webgl\",\"webkit-3d\",\"moz-webgl\"],c=null,d=0;dClick here for more information.\"):c(d)}return h};return a.Context=Sk.misceval.buildClass(a,function(a,b){b.__init__=new Sk.builtin.func(function(a,b){var c=document.getElementById(b.v),d=f(b.v,c);if(!d)throw new Error(\"Your browser does not appear to support WebGL.\");for(var e in a.gl=d,d.__proto__)if(\"number\"==typeof d.__proto__[e])Sk.abstr.objectSetItem(a.$d,new Sk.builtin.str(e),d.__proto__[e]);else if(\"function\"==typeof d.__proto__[e])switch(e){case\"bufferData\":break;case\"clearColor\":break;case\"drawArrays\":break;case\"getAttribLocation\":break;case\"getUniformLocation\":break;case\"shaderSource\":break;case\"uniformMatrix4fv\":break;case\"vertexAttribPointer\":break;case\"viewport\":break;default:(function(b){Sk.abstr.objectSetItem(a.$d,new Sk.builtin.str(e),new Sk.builtin.func(function(){var a=d.__proto__[b];return a.apply(d,arguments)}))})(e);}d.clearColor(100/255,149/255,237/255,1),d.clear(d.COLOR_BUFFER_BIT)}),b.tp$getattr=Sk.builtin.object.prototype.GenericGetAttr,b.bufferData=new Sk.builtin.func(function(a,b,c,d){a.gl.bufferData(b,c.v,d)}),b.clearColor=new Sk.builtin.func(function(a,b,c,d,e){a.gl.clearColor(Sk.builtin.asnum$(b),Sk.builtin.asnum$(c),Sk.builtin.asnum$(d),Sk.builtin.asnum$(e))}),b.getAttribLocation=new Sk.builtin.func(function(a,b,c){return a.gl.getAttribLocation(b,c.v)}),b.getUniformLocation=new Sk.builtin.func(function(a,b,c){return a.gl.getUniformLocation(b,c.v)}),b.shaderSource=new Sk.builtin.func(function(a,b,c){a.gl.shaderSource(b,c.v)}),b.drawArrays=new Sk.builtin.func(function(a,b,c,d){a.gl.drawArrays(Sk.builtin.asnum$(b),Sk.builtin.asnum$(c),Sk.builtin.asnum$(d))}),b.vertexAttribPointer=new Sk.builtin.func(function(a,b,c,d,e,f,g){a.gl.vertexAttribPointer(b,Sk.builtin.asnum$(c),Sk.builtin.asnum$(d),e,Sk.builtin.asnum$(f),Sk.builtin.asnum$(g))}),b.viewport=new Sk.builtin.func(function(a,b,c,d,e){a.gl.viewport(Sk.builtin.asnum$(b),Sk.builtin.asnum$(c),Sk.builtin.asnum$(d),Sk.builtin.asnum$(e))}),b.uniformMatrix4fv=new Sk.builtin.func(function(a,b,c,d){a.gl.uniformMatrix4fv(Sk.builtin.asnum$(b),c,d.v)}),b.setDrawFunc=new Sk.builtin.func(function(a,b){var c=new Date().getTime(),d=setInterval(function(){Sk.misceval.callsimArray(b,[a,new Date().getTime()-c])},1e3/60)})},\"Context\",[]),a.Float32Array=Sk.misceval.buildClass(a,function(a,b){b.__init__=new Sk.builtin.func(function(a,b){a.v=\"number\"==typeof b?new Float32Array(b):new Float32Array(Sk.ffi.remapToJs(b))}),b.__repr__=new Sk.builtin.func(function(a){for(var b=[],c=0;c