diff --git a/src/lib/cs1014/__init__.py b/src/lib/cs1014/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/lib/cs1014/dictionaries.py b/src/lib/cs1014/dictionaries.py deleted file mode 100644 index 705cd43994..0000000000 --- a/src/lib/cs1014/dictionaries.py +++ /dev/null @@ -1,782 +0,0 @@ -from pedal.report.imperative import gently_r, explain_r -from pedal.cait.cait_api import * -from pedal.mistakes.instructor_append import app_assign - - -# dict_acc_group -def dict_acc_group(all_keys, unused_keys, used_keys): - print_dict_key(all_keys) - var_instead_of_key(all_keys) - parens_in_dict(all_keys) - missing_key(used_keys) - str_list(all_keys) - dict_parens_brack() - comma_dict_acc() - var_key(all_keys) - miss_dict_acc() - comp_in_dict_acc() - key_comp(all_keys) - col_dict() - wrong_keys(unused_keys) - - -# dict_list_group -def dict_list_group(all_keys): - list_str_dict(all_keys) - list_var_dict_acc() - list_str_as_list_var(all_keys) - fetch_acc_dict(all_keys) - list_as_dict() - iter_as_key(all_keys) - iter_prop_dict_acc() - - append_and_sum() - - dict_out_of_loop(all_keys) - dict_access_not_in_loop() - no_dict_in_loop() - app_assign() - - -# dict_decision -def dict_decision_group(all_keys, c_value, num_slices): - func_filter(all_keys) - filt_key(c_value, num_slices) - compare_key(c_value) - str_equality() - fetch_acc_dict([c_value]) - - -# dict_plot -def dict_plot_group(): - show_args() - dict_plot() - - -# dict_chain -def dict_chain_group(key_sets): - for key_set in key_sets: - key_order(key_set) - key_order_unchained(key_set) - - -def var_check(expr, keys=None): - """ - - :param expr: Expression to be evaluated - :type expr: CaitNode - :param keys: List of keys - :type keys: list of Str - :return: Key value if expression was a name node assigned a key value or if the value is a string key value, - otherwise returns False - :rtype: bool/Str - """ - if expr.is_ast('Str') and (keys is None or expr.value in keys): - return expr.value - elif expr.is_ast("Name"): - matches = find_matches("{} = __key__".format(expr.value)) # TODO: Relies on .value returning id for Name nodes - for match in matches: - __key__ = match["__key__"] - if __key__.is_ast('Str') and (keys is None or __key__.value in keys): - return __key__.value - return False - - -def list_dict_indices(expr): - """ - Takes the first key slice of a dictionary and returns a list of all the slices. - :param expr: a single key slice value at the one level of slicing - :type expr: CaitNode - :return: A list of index ast nodes (each slice), .value should get the ast_node unit that was used for the slice - :rtype: CaitNode(Index) - """ - return expr.parent.parent.parent.find_all('Index') # TODO: Relies on AST Structure - - -def uncover_type(name, tifa_type): - """ - - :param name: A Name Ast Node whose type we are looking through - :type name: CaitNode (Name) - :param tifa_type: The data type of the item - :type tifa_type: str - :return: the data_state object representing when name was of the specified type - :rtype: Tifa State or None - """ - state = name.get_data_state() - if name.was_type(tifa_type) and state: - while state and str(state.type) != tifa_type: - state = state.trace[0] - return state - return None - - -def dict_detect(expr): - if expr.find_match("_var_[__expr__]", use_previous=False): - return expr - elif expr.is_ast("Name"): - # TODO: This match is buggy because in the case slicing, the dictionary dives deeper instead of making siblings - matches = find_matches("{} = __expr__".format(expr.id)) - submatch = None - for match in matches: - __expr__ = match["__expr__"] - submatch = __expr__.find_match("__expr__[__expr2__]", use_previous=False) - if submatch: - return __expr__ - # TODO: Rework to chase down all indirect accesses part of the same chain as a string (and then CaitNode) - # fall through return None - return None - - -# dict_hard_codes -def dict_hard_codes_group(print_vals, list_vals): - hard_coding(print_vals) - hard_coded_list(list_vals) - - -# dict_hard_codes -def hard_coding(val_list): - message = ("Please show code that makes the computer extract " - "the value from the dictionary.") - code = "hard_code" - tldr = "Printing raw value" - # Pattern 1 possibility - matches = find_matches("print(__exp__)") - for match in matches: - __exp__ = match["__exp__"] - value = __exp__.value - if value in val_list: - return explain_r(message, code, label=tldr) - - # Pattern 2 possibility - matches = find_matches("__exp__\n" - "print(_var_)") - for match in matches: - __exp__ = match["__exp__"] - _var_ = match["_var_"] - submatches = __exp__.find_matches("_var_ = __exp2__") - for submatch in submatches: - __exp2__ = submatch["__exp2__"] - value = __exp2__.value - if value in val_list: - return explain_r(message, code, label=tldr) - return False - - -# dict_acc_group -def print_dict_key(keys): - message = ('You\'ve printed the dictionary key "{}" instead of using an extracted value and ' - 'printing it. Use the Dictionary access syntax to print the value associated with a key') - code = "dict_k_print" - tldr = "Printing key, not value" - matches = find_matches("print(__str__)") - matches += find_matches("print([__str__])") - - for match in matches: - __str__ = match["__str__"] - key = var_check(__str__, keys) - if key: - return explain_r(message.format(key), code, label=tldr) - return False - - -# dict_acc_group -def var_instead_of_key(keys): - message = ("It looks like you are trying to use ({}) as a dictionary key. " - "Use the dictionary access syntax to get values from a dictionary") - code = "var_as_k" - tldr = "Using Variable instead of key" - matches = find_matches("_var_") - matches += find_matches("[_var_]") - for match in matches: - _var_ = match["_var_"] - if _var_.id in keys: - submatch = find_match("_dict_['{}']".format(_var_.id)) - submatch2 = find_match("{} = ___".format(_var_.id)) - if submatch is None and submatch2 is None: - # If we don't find a dictionary access using this key and - # we don't see that this variable is assigned to a value... - return explain_r(message.format(_var_.id), code, label=tldr) - return False - - -# dict_acc_group -def parens_in_dict(keys): - """ - Checks fr the mistsake of using parenthesis as a dictionary access - :param keys: List of keys - :type keys: list of Str - :return: Feedback String - :rtype: Str - """ - message = ('It seems like you are having trouble with dictionary syntax. The dictionary key "{}"' - "should use brackets.") - code = "par_dict" - tldr = "Not Using Dictionary Brackets" - matches = find_matches("_var_(__str__)") - for match in matches: - __str__ = match['__str__'] - _var_ = match['_var_'] - key = var_check(__str__, keys) - if key and data_state(_var_.id): - return explain_r(message.format(key), code, label=tldr) - return False - - -# dict_list_group -def list_as_dict(): - message = ("The list of Dictionaries {} is not itself a dictionary. " - "To access key-value pairs of the dictionaries in the list, " - "you need to access each dictionary in the list one at a time.") - code = "list_dict" - tldr = "List is not a dictionary" - matches = find_matches("_list_[__exp__]") - for match in matches: - _list_ = match['_list_'] - type_check = uncover_type(_list_, "ListType") - if type_check and str(type_check.type.subtype) == "DictType": - return explain_r(message.format(_list_.id), code, label=tldr) - return False - - -# dict_list_group -def dict_out_of_loop(keys): - message = ("Remember that a list of dictionaries, like {}, " - "is still a list of individual items. Each dictionary needs to be accessed with " - "the appropriate key-value pair one at a time.") - code = "dict_out_loop" - tldr = "Dictionary Access Outside of Loop" - matches = find_matches("__exp__\n" - "for ___ in _var_:\n" - " pass") - matches += find_matches("for ___ in _var_:\n" - " pass\n" - "__exp__\n") - for match in matches: - __exp__ = match['__exp__'] - _var_ = match['_var_'] - submatches = __exp__.find_matches("{var}[__str__]".format(var=_var_.id)) - for submatch in submatches: - __str__ = submatch['__str__'] - if __str__.is_ast("Str") and __str__.value in keys: - return explain_r(message.format(_var_.id), code, label=tldr) - return False - - -# dict_acc_group -def wrong_keys(unused_keys): - message = 'This problem does not require the key "{}".\n' - code = "unused_key" - tldr = "Unnecessary Key Usage" - - matches = find_matches("_var_[__str__]") - for match in matches: - __str__ = match["__str__"] - indices = list_dict_indices(__str__) - for index in indices: - __str__ = index.value - key = var_check(__str__, unused_keys) - if key: - return explain_r(message.format(key), code, label=tldr) - return False - - -# dict_list_group -def dict_access_not_in_loop(): - message = ("You haven't used the dictionary access syntax in a for loop. " - "Remember that a list of dictionaries is still a list of individual items. " - "Each dictionary needs to be accessed with the appropriate key-value pair one at a time.") - code = "dict_acc_loop" - tldr = "Dictionary access not in loop" - - matches = find_matches("for ___ in ___:\n" - " __exp__") - for match in matches: - submatches = match["__exp__"].find_matches("_var_[__str__]") - if submatches: - return False - return explain_r(message, code, label=tldr) - - -def hard_coded_list(val_list): - message = ("In later abstractions, it's not possible to view the values of a specific key in a list." - "You should use a dictionary key-value pair to access values in the list of dictionaries.") - code = "hard_list" - tldr = "Don't use raw list" - matches = find_matches("[__exp__]") - for match in matches: - __exp__ = match['__exp__'].parent - if __exp__.ast_name == "List": - try: - vals = sum([x.value for x in __exp__.elts]) - if sum(val_list) == vals: - return explain_r(message, code, label=tldr) - except TypeError: - pass # This should be the only error - return False - - -# dict_list_group -def iter_as_key(keys): - message = ("It looks like you are using the iteration variable {}" - " to access a value of a specific key in a dictionary. " - "To access a key-value from a list of dictionaries, use ") - code = "iter_key" - tldr = "Iteration variable is not key" - matches = find_matches("for _var_ in ___:\n" - " pass") - for match in matches: - _var_ = match['_var_'] - submatches = find_matches("_var2_[__str__]") - missing = True - for submatch in submatches: - __str__ = submatch["__str__"] - if __str__.is_ast("Str") and __str__.value == _var_.id: - missing = False - break - if missing and _var_.id in keys: - return explain_r(message.format(_var_.id), code, label=tldr) - return False - - -# dict_list_group -def list_str_as_list_var(keys): - message = ("The list variable in an iteration can only take lists. " - "To grab individual values in a list of dictionaries, " - "you need to use the appropriate key for each dictionary.") - code = "list_var_dict" - tldr = "List variable cannot filter" - matches = find_matches("for ___ in [__str__]:\n" - " pass") - for match in matches: - __str__ = match["__str__"] - if __str__.is_ast("Str") and __str__.value in keys: - return explain_r(message, code, label=tldr) - return False - - -# dict_list_group -def append_and_sum(): - message = ("It looks like you're trying to build a list and " - "then calculate a value. While this will get you a " - "correct answer, you can calculate the value directly instead of first building a list.") - code = "app_sum" - tldr = "Unnecessary append and sum" - matches = find_match("for ___ in ___:\n" - " _var_.append()\n" - "for ___ in _var_:\n" - " ___ = ___ + ___") - if matches: - return explain_r(message, code, label=tldr) - return False - - -# dict_list_group -def iter_prop_dict_acc(): - message = ("Improper usage of iteration variable." - "The for statement gives the iteration variable a value, " - "in this case, a dictionary. That dictionary can only be accessed in the body of the iteration.") - code = "iter_dict_acc" - tldr = "Iteration variable only initializes" - match = find_match("for _var_[__str__] in ___:\n" - " pass") - if match: - return explain_r(message, code, label=tldr) - return False - - -# dict_list_group -def list_str_dict(keys): - message = ("When using dictionaries with iteration, the list cannot just be a key " - 'value like "{}", it must be the list of dictionaries.') - code = "list_str" - tldr = "List variable is string" - matches = find_matches("for ___ in __str__:\n" - " pass") - for match in matches: - __str__ = match['__str__'] - if __str__.is_ast("Str") and __str__.value in keys: - return explain_r(message.format(__str__.value), code, label=tldr) - return False - - -# dict_acc_group -def missing_key(keys): - """ - Checks if student is missing a key - - TODO: Should be good if run AFTER the var_instead_of_key check, although it doesn't appear to catch a key that's - been assigned as the value of an unused variable. - :param keys: list of keys - :type keys: list of Str - :return: Feedback String - :rtype: Str - """ - message = "You seem to be missing the following dictionary key(s):" - code = "miss_key" - tldr = "Missing necessary keys" - key_list = "" - first = False - for key in keys: - matches = find_matches("\"{}\"".format(key)) - if not matches: - if not first: - key_list += ", " - key_list += '
  • "' + key + '"
  • ' - if key_list != "": - return explain_r(message.format(key_list), code, label=tldr) - return False - - -def blank_key(keys): - message = "You seem to be missing the following dictionary keys:" - code = "blank_key" - tldr = "Missing Key" - key_list = "" - - first = False - for key in keys: - if not find_match("_var_['{}']".format(key)): - if not first: - key_list += ", " - key_list += '
  • "' + key + '"
  • ' - - if key_list != "": - return explain_r(message.format(key_list), code, label=tldr) - - -# dict_acc_group -def dict_parens_brack(): - message = ("It looks like you are trying to dictionary access {}. " - "The dictionary access syntax does not require parenthesis.") - code = "dict_parbrack" - tldr = "Improper dictionary access" - matches = find_matches("_var_([__str1__][__str2__])") - matches += find_matches("_var_([__str1__])") - for match in matches: - _var_ = match['_var_'] - __str1__ = match["__str1__"] - __str2__ = __str1__ - try: - __str2__ = match["__str2__"] - except KeyError: - pass - if __str1__.is_ast("Str") and __str2__.is_ast("Str") and data_state(_var_.id): - return explain_r(message.format(_var_.id), code, label=tldr) - return False - - -# dict_acc_group -def comma_dict_acc(): - message = ("It looks like you are trying to dictionary access {}. " - "Unlike with initializing dictionaries, keys don't need to be separated with commas " - "when accessing dictionary contents.") - code = "comma_dict" - tldr = "Improper dictionary access" - matches = find_matches("__exp__,[__str2__]") - for match in matches: - submatch = match['__exp__'].find_match("_dict_[__str1__]") - if submatch: - return explain_r(message.format(submatch['_dict_'].id), code, label=tldr) - return False - - -# dict_list_group -def no_dict_in_loop(): - message = "When working with a list of dictionaries, you need to use a dictionary access in your iteration." - code = "no_dict_loop" - tldr = "Missing dictionary access loop" - - matches = find_matches("for _item_ in _list_:\n" - " __expr__") - for match in matches: - _item_ = match['_item_'] - submatches = match['__expr__'].find_matches("_item_[__str__]") - for submatch in submatches: - key = var_check(submatch["__str__"]) - if key: - return False - return explain_r(message, code, label=tldr) - - -# dict_decision -def func_filter(keys): - message = "Please do not modify the function call to retrieve the data." - code = "func_filt" - tldr = "Attempting to filter using fetch" - matches = find_matches("_var_.get_weather(__str__)") - for match in matches: - __str__ = match["__str__"] - if __str__.value in keys: # TODO: Relies on .value returning id for Name nodes - return explain_r(message, code, label=tldr) - return False - - -# dict_acc_group -def str_list(keys): - message = ('If you are trying to use a string such as "{}" as a dictionary key, ' - 'it needs to be prefaced with a dictionary') - code = "str_list" - tldr = "String list used instead of Dictionary" - - for key in keys: - if find_match("['{}']".format(key)): - return explain_r(message.format(key), code, label=tldr) - return False - - -# dict_list_group -def list_var_dict_acc(): - message = ("The for statement only specifies a list target, in this case, a list of dictionaries. It does not " - "operate on the entire list. Keys should be used on the individual dictionaries of the list.") - code = "l_var_dacc" - tldr = "List variable cannot be dictionary accessed" - - matches = find_matches("for ___ in _var_[__str__]:\n" - " pass") - if matches: - return explain_r(message, code, label=tldr) - return False - - -# dict_acc_group -def key_comp(keys): - message = ('The strings "{}" and "{}" are keys. ' - 'Dictionary keys do not need to be compared to anything as they ' - 'are not filtering data. Dictionary keys are only used to access existing data.') - code = "key_comp" - tldr = "Comparing Keys" - """ - matches = find_matches("for _var_ in ___:\n" - " if _var_[__str1__] == __str2__:\n" - " pass") - """ - matches = find_matches("for _var_ in ___:\n" - " if __expr1__ == __expr2__:\n" - " pass") - for match in matches: - __str1__ = match["__expr1__"] - __str2__ = match["__expr2__"] - submatch1 = dict_detect(__str1__) - submatch2 = dict_detect(__str2__) - # __str1__ = match["__str1__"] - if submatch1: - __str1__ = submatch1.find_match("_var_[__str1__]", use_previous=False)["__str1__"] - elif submatch2: - __str2__ = submatch2.find_match("_var_[__str2__]", use_previous=False)["__str2__"] - if submatch1 or submatch2: - value1 = var_check(__str1__, keys) - value2 = var_check(__str2__, keys) - if value1 and value2: - return explain_r(message.format(__str1__.value, __str2__.value), code, label=tldr) - return False - - -# dict_acc_group -def col_dict(): - message = "When using multiple keys, each key should have it's own set of brackets." - code = "col_dict" - tldr = "Improper Dictionary Access" - - matches = find_matches("_var_[__str1__: __str2__]") - if matches: - return explain_r(message, code, label=tldr) - return False - - -# dict_acc_group -def var_key(keys): - # TODO: Could use this method for other methods to check if the code needs to use the value of the variable. - # In other words, if we have a variable in place of a key AND this test fails, it means that they have an - # initialized variable whose assigned value we should check as we are able to (statically). - message = ("It looks like you are trying to use {} as a key. Dictionary keys are string values. " - "Variable names don't have a meaning to a computer.") - code = "var_key" - tldr = "Variables are not keys" - - matches = find_matches("_var_[_key_]") - for match in matches: - _key_ = match['_key_'] - if _key_.id in keys and not _key_.was_type("StrType"): - return explain_r(message.format(_key_.id), code, label=tldr) - return False - - -# dict_plot -def key_order(keys): - # TODO: Is it possible to run this test after confirming (through other tests) that there are no unused keys and - # that all keys used are the correct keys, such that the feedback message can explicitly address JUST the case of - # wrong order? - message = "It looks like you aren't using the correct keys, or the correct key order. Double check your data map." - code = "key_order_c" - tldr = "Wrong key order" - - construct = None - # Assemble chain of dictionary slicing - find_chain = "_var_" - for a_slice in range(len(keys)): - find_chain += "[__str{}__]".format(a_slice) - # If we find a chain of dictionary accesses - if find_match(find_chain): - # Assemble a new match pattern using the provided key order - construct = "_var_" - for key in keys: - construct += "['{}']".format(key) - - if construct: - # check if we have a set of keys of the proper order - matches = find_matches(construct) - if not matches: - return explain_r(message, code, label=tldr) - return False - - -# dict_plot -def key_order_unchained(keys): - message = "It looks like you aren't using the correct keys, or the correct key order. Double check your data map." - code = "key_order_u" - tldr = "Wrong key order" - - construct = None - find_chain = "" - for a_slice in range(len(keys)): - find_chain += "_var{a2}_ = _var{a1}_[__str{a1}__]\n".format(a2=a_slice + 1, a1=a_slice) - if find_match(find_chain): - construct = "" - count = 0 - for key in keys: - construct += "_var{a2}_ = _var{a1}_['{key}']\n".format(a2=count + 1, a1=count, key=key) - count += 1 - - if construct: - matches = find_matches(construct) - if not matches: - return explain_r(message, code, label=tldr) - return False - - -# dict_decision -def filt_key(c_value, num_slices): - message = ('It looks like you\'re using "{c_value}" as a dictionary key to filter data. ' - "Dictionary keys don't filter data, they only access data that's already there. " - "You should be comparing data retrieved from the dictionary to '{c_value}'") - code = "filt_key" - tldr = "Attempting filter as Key" - - construct = "_var_" - for a_slice in range(num_slices): - construct += "[__str{}__]".format(a_slice) - matches = find_matches(construct) - for match in matches: - for num in range(a_slice + 1): - value = match["__str{}__".format(num)] - if value.is_ast("Str") and value.value == c_value: - return explain_r(message.format(c_value=value), code, label=tldr) - return False - - -# dict_acc_group -def miss_dict_acc(): - message = ("You are missing something that looks like a dictionary access. " - "In this unit, you should be using dictionary access") - code = "miss_acc" - tldr = "Missing Dictionary Access" - - if not find_matches("_var_[__str1__]"): - return explain_r(message, code, label=tldr) - return False - - -# dict_decision -def compare_key(c_value): - message = ('In this problem, "{}" is not a key, ' - 'but something you should compare against.'.format(c_value)) - code = "comp_key" - tldr = "Using filter value as key" - - matches = find_matches("__exp0__ == __exp1__") - for match in matches: - for num in range(2): - __exp__ = match["__exp{}__".format(num)] - submatches = __exp__.find_matches("[__str__]") - for submatch in submatches: - __str__ = submatch["__str__"] - if __str__.is_ast("Str") and __str__.value == c_value: - return explain_r(message, code, label=tldr) - return False - - -# dict_decision -def str_equality(): - message = ('You are comparing two different string values, "{}" and "{}". While dictionary keys are strings, ' - "they are only interpreted by the computer as keys when used with the dictionary access syntax") - code = "str_eq" - tldr = "Comparing equality of raw strings" - - matches = find_matches("__str1__ == __str2__") - for match in matches: - __str1__ = match["__str1__"] - __str2__ = match["__str2__"] - if __str1__.is_ast("Str") and __str2__.is_ast("Str"): - return explain_r(message.format(__str1__.value, __str2__.value), code, label=tldr) - return False - - -# dict_list_group and dict_decision_group -def fetch_acc_dict(values): - message = ("The code to fetch the list of dictionaries, {}.{}, cannot be used to select data. " - "Selection of data should be done with an if statement") - code = "fetch_acc" - tldr = "Malformed Dictionary List Fetch" - - matches = find_matches("_var_._func_[__str__]") - for match in matches: - _var_ = match["_var_"].id - _func_ = match["_func_"].id - __str__ = match["__str__"] - if __str__.is_ast("Str") and __str__.value in values: - return explain_r(message.format(_var_, _func_), code, label=tldr) - return False - - -# dict_plot -def show_args(): - # TODO: Add this to plotting mistakes? - message = ("The plt.show function only tells the computer to display the plot. " - "If you want to modify the plot, use other available plotting functions.") - code = "show_args" - tldr = "Show takes no arguments" - - matches = find_matches("plt.show(__exp__)") - if matches: - return explain_r(message, code, label=tldr) - return False - - -# dict_plot -def dict_plot(): - message = ("The list {} is a list of dictionaries. plt.plot only accepts a list" - " of numbers. You need to extract the numbers from the list of dictionaries first.") - code = "dict_plot" - tldr = "Plotting list of Dictionaries" - - matches = find_matches("plt._func_(_var_)") - for match in matches: - _var_ = match["_var_"] - var_state = _var_.get_data_state() - if var_state and str(var_state.type) == "ListType" and str(var_state.type.subtype) == "DictType": - return explain_r(message.format(_var_.id), code, label=tldr) - return False - - -# dict_acc_group -def comp_in_dict_acc(): - message = ("You are using a boolean expression in a dictionary access. Remember that the dictionary " - "access takes a key and provides a value. The comparison should be made with the value, not the key.") - code = "comp_acc" - tldr = "Comparison in key access" - - matches = find_matches("_var_[__exp__][__exp2__ == __exp3__]") - if matches: - return explain_r(message, code, label=tldr) - return False diff --git a/src/lib/cs1014/input_mistakes.py b/src/lib/cs1014/input_mistakes.py deleted file mode 100644 index 586525f8e7..0000000000 --- a/src/lib/cs1014/input_mistakes.py +++ /dev/null @@ -1,24 +0,0 @@ -from pedal.report.imperative import gently_r, explain_r -from pedal.cait.cait_api import * - - -def unnecessary_cast(needed_casts): - """ - - Args: - needed_casts: List of casts that are necessary to this problem - - Returns: - - """ - message = "Converting to {} is unnecessary in this problem" - code = "ex_cast" - tldr = "Unnecessary Conversion" - - known_casts = ["float", "int", "str"] - matches = find_matches("_cast_(___)") - for match in matches: - user_cast = match["_cast_"].id - if user_cast not in needed_casts and user_cast in known_casts: - return explain_r(message.format(user_cast), code, label=tldr) - return False diff --git a/src/lib/cs1014/tests/__init__.py b/src/lib/cs1014/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/lib/cs1014/tests/test_dictionary.py b/src/lib/cs1014/tests/test_dictionary.py deleted file mode 100644 index 29a183c573..0000000000 --- a/src/lib/cs1014/tests/test_dictionary.py +++ /dev/null @@ -1,1210 +0,0 @@ -import sys -import os - -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) - -from tests.mistake_test_template import * -from CS1014.dictionaries import * -from CS1014.input_mistakes import * -from pedal.mistakes.iteration_context import all_labels_present -from pedal.resolvers import simple -# import pedal.sandbox.compatibility as compatibility -# from tests.execution_helper import Execution -from pedal.toolkit.utilities import * - - -class DictionaryMistakeTest(MistakeTest): - def setUp(self): - self._dict_str = ("[{'City': 'Birmingham', 'Precipitation': 0.0, 'Temperature': 46}," - "{'City': 'Fairbanks' , 'Precipitation': 1.37, 'Temperature': 57}," - "{'City': 'Miami', 'Precipitation': 1.86, 'Temperature': 80}," - "{'City': 'Los Angeles', 'Precipitation': 0.5, 'Temperature': 73}," - "{'City': 'Denver', 'Precipitation': 0.0, 'Temperature': 49}," - "{'City': 'Chicago', 'Precipitation': 0.23, 'Temperature': 40}]") - - def test_hard_coding(self): - constants = [99.23, "99.23"] - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print ("99.23")') - ret = hard_coding(constants) - self.assertTrue(ret, "Expected feedback message, got {} instead".format(ret)) - - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'price = "99.23"\n' - 'print (price)') - ret = hard_coding(constants) - self.assertTrue(ret, "Expected feedback message, got {} instead".format(ret)) - - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print (book["price"])') - ret = hard_coding(constants) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_print_dict_key(self): - # TODO: Check output string - key_list = ['price', 'number_of_pages', 'discount'] - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'how_much= book["price"]\n' - 'print("price")') - ret = print_dict_key(key_list) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('price = "price"\n' - 'book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'how_much= book[price]\n' - 'print(price)') - ret = print_dict_key(key_list) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'how_much= book["price"]\n' - 'print(["price"])') - ret = print_dict_key(key_list) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print (book["price"])') - ret = print_dict_key(key_list) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('price = "price"\n' - 'book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print (book[price])') - ret = print_dict_key(key_list) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_var_instead_of_key(self): - # TODO: Check output string - key_list = ['price', 'number_of_pages', 'discount', 'title'] - - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print(price)') - ret = var_instead_of_key(key_list) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print(book["price"])') - ret = var_instead_of_key(key_list) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'price = book["price"]\n' - 'print(price)') - ret = var_instead_of_key(key_list) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source("import weather\n" - "import matplotlib.pyplot as plt\n" - "weather_reports = weather.get_report()\n" - "list = []\n" - "City = input('City')\n" - "for report in weather_reports:\n" - " if City == report['Station']['City']:\n" - " list.append(report[\"Data\"][\"Precipitation\"])\n") - ret = var_instead_of_key(['City', 'Data', 'Precipitation']) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_parens_in_dict(self): - # TODO: Check output string - key_list = ['price', 'number_of_pages', 'discount'] - - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print(book("price"))') - ret = parens_in_dict(key_list) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('price = "price"\n' - 'book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print(book(price))') - ret = parens_in_dict(key_list) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - self.assertTrue("price" in ret, "Message '{}' didn't include correct key".format(ret)) - - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print(book["price"])') - ret = parens_in_dict(key_list) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('price = "price"' - 'book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print(book[price])') - ret = parens_in_dict(key_list) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('price = input("price")\n' - 'book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print(book[price])') - ret = parens_in_dict(key_list) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('for item in _list:\n' - ' print(item("price"))') - ret = parens_in_dict(key_list) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - def test_list_as_dict(self): - # TODO: Check output string - self.to_source("total = 0\n" - "weather_reports = {}\n" - "for precipitation in weather_reports:\n" - " total = total + weather_reports['Precipitation']\n" - "print (total)".format(self._dict_str)) - ret = list_as_dict() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source("total = 0\n" - "weather_reports = {}\n" - "for precipitation in weather_reports:\n" - " total = total + precipitation['Precipitation']\n" - "print (total)\n".format(self._dict_str)) - ret = list_as_dict() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source("earthquake_report = [{\"Location\" : \"California\", \"Magnitude\" : 2.3, \"Depth\" : 7.66},\n" - " {\"Location\" : \"Japan\", \"Magnitude\" : 5.3, \"Depth\" : 3.34},\n" - " {\"Location\" : \"Burma\", \"Magnitude\" : 4.9, \"Depth\" :97.07},\n" - " {\"Location\" : \"Alaska\", \"Magnitude\" : 4.6, \"Depth\" : 35.0},\n" - " {\"Location\" : \"Washington\", \"Magnitude\" : 2.19, \"Depth\" : 15.28},\n" - " {\"Location\" : \"China\", \"Magnitude\" : 4.3, \"Depth\" : 10.0}\n" - " ]\n" - "total = 0\n" - "number = 0\n" - "for earthquake_report in earthquake_reports:\n" - " total = total + earthquake_report['Magnitude']\n" - " number = 1 + number\n" - "average = total / number\n" - "print(average)" - ) - ret = list_as_dict() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - def test_dict_out_of_loop(self): - # TODO: Check output string - keys = ['Precipitation'] - self.to_source('rain = weather_reports["Precipitation"]\n' - 'total = 0\n' - 'for report in weather_reports:\n' - ' total = total + rain\n' - 'print(total)\n') - ret = dict_out_of_loop(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('weather_reports = {}\n' - 'total = 0\n' - 'for report in weather_reports:\n' - ' total = total + report["Precipitation"]\n' - 'print(total)\n'.format(self._dict_str)) - ret = dict_out_of_loop(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('import matplotlib.pyplot as plt\n' - 'import weather\n' - 'weather_reports = weather.get_weather()\n' - 'BB_min = []\n' - 'BB_max = []\n' - 'for weather in weather_reports: \n' - ' if ("Blacksburg" in weather["Station"]["City"]): \n' - ' BB_min.append(weather["Data"]["Temperature"]["Min Temp"])\n' - ' \n' - 'for weather in weather_reports: \n' - ' if ("Blacksburg" in weather["Station"]["City"]):\n' - ' BB_max.append(weather["Data"]["Temperature"]["Max Temp"])\n' - 'plt.scatter(BB_min,BB_max)\n' - 'plt.xlabel("Trend")\n' - 'plt.ylabel("Temperatures")\n' - 'plt.title("Relationship between Minimum and Maximum Temperatures in Blacksburg")\n' - 'plt.show()\n') - all_labels_1 = all_labels_present() - ret = dict_out_of_loop(keys) - self.assertFalse(all_labels_1, "false negative") - all_labels_2 = all_labels_present() - self.assertFalse(ret, "...") - self.assertTrue(all_labels_1 == all_labels_2, "Side effects aren't undoing themselves") - - def test_wrong_keys(self): - # TODO: Check output string - keys = ['Date', "Temperature", "Wind", "Min Temp", "Max Temp", "Avg Temp", "Direction", "Speed", "Month", - "Year", - "Week of", "Full", "State", "Code", "City", "Location"] - self.to_source("total = 0\n" - "for reports in weather_reports:\n" - " total = total + reports['Temperature']\n" - "print(total)\n") - ret = wrong_keys(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source("temperature = 'Temperature'\n" - "total = 0\n" - "for reports in weather_reports:\n" - " total = total + reports[temperature]\n" - "print(total)\n") - ret = wrong_keys(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - self.assertTrue("Temperature" in ret, "Message '{}' didn't include correct key".format(ret)) - - self.to_source("total = 0\n" - "for reports in weather_reports:\n" - " total = total + reports['Precipitation']\n" - "print(total)\n") - ret = wrong_keys(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source("precip = 'Precipitation'\n" - "total = 0\n" - "for reports in weather_reports:\n" - " total = total + reports[precip]\n" - "print(total)\n") - ret = wrong_keys(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_dict_access_not_in_loop(self): - self.to_source('weatherPrecipitation = weather_reports["Precipitation"]\n' - 'for report in weather_reports:\n' - ' total_precipitation = weatherPrecipitation + total_precipitation\n' - 'print(total_precipitation)\n') - ret = dict_access_not_in_loop() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('for weather_report in weather_reports:\n' - ' total = total + precipitations[Precipitation]\n' - 'print(total)\n') - ret = dict_access_not_in_loop() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('for weather_report in weather_reports:\n' - ' total = total + precipitations["Precipitation"]\n' - 'print(total)\n') - ret = dict_access_not_in_loop() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('for weather in weather_reports:\n' - ' if ("San Diego" in weather["Station"]["City"]):\n' - ' sandiego_list.append(weather["Data"]["Temperature"]["Avg Temp"])\n' - 'for weather in weather_reports:\n' - ' if ("Blacksburg" in weather["Station"]["City"]):\n' - ' blacksburg_list.append(weather["Data"]["Temperature"]["Avg Temp"])\n' - 'for temp in sandiego_list:\n' - ' sandiego_temp = sandiego_temp + 1\n' - ' sandiego_number = sandiego_number + temp\n' - 'sandiego_average = sandiego_number / sandiego_temp\n' - 'for temp in blacksburg_list:\n' - ' blacksburg_temp = blacksburg_temp + 1\n' - ' blacksburg_number = blacksburg_number + temp\n' - 'blacksburg_average = blacksburg_number / blacksburg_temp\n' - 'plt.scatter(BB_min,BB_max)\n' - 'plt.xlabel("Trend")\n' - 'plt.ylabel("Temperatures")\n' - 'plt.title("Relationship between Minimum and Maximum Temperatures in Blacksburg")\n' - 'plt.show()\n') - ret = dict_access_not_in_loop() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - ret2 = all_labels_present() - self.assertFalse(ret2, "Expected False, got message instead") - - def test_hard_coded_list(self): - val_list = [0.0, 1.37, 1.86, 0.5, 0.0, 0.23] - self.to_source('total_rain = 0\n' - 'weather_reports = [0.0,1.37,1.86,0.5,0.0,0.23]\n' - 'for rain in weather_reports:\n' - ' total_rain = rain + total_rain\n' - 'print(total_rain)\n') - ret = hard_coded_list(val_list) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_rain = 0\n' - 'weather_reports = {}\n' - 'for rain in weather_reports:\n' - ' total_rain = rain + total_rain\n' - 'print(total_rain)\n'.format(self._dict_str)) - ret = hard_coded_list(val_list) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_iter_as_key(self): - # TODO: Check output string - keys = ['Data', 'Date', "Station", "Temperature", "Precipitation", "Wind", "Min Temp", "Max Temp", "Avg Temp", - "Direction", "Speed", "Month", "Year", "Week of", "Full", "State", "Code", "City", "Location"] - self.to_source('total_precipitation = 0\n' - 'for Precipitation in weather_reports:\n' - ' total_precipitation = total_precipitation + "Precipitation"\n' - 'print(total_precipitation)\n') - ret = iter_as_key(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for Precipitation in weather_reports:\n' - ' total_precipitation = total_precipitation + Precipitation["Precipitation"]\n' - 'print(total_precipitation)\n') - ret = iter_as_key(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip\n' - 'print(total_precipitation)\n') - ret = iter_as_key(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_dict_acc_as_lis_var(self): - keys = ['Data', 'Date', "Station", "Temperature", "Precipitation", "Wind", "Min Temp", "Max Temp", "Avg Temp", - "Direction", "Speed", "Month", "Year", "Week of", "Full", "State", "Code", "City", "Location"] - self.to_source('precipitation_total=0\n' - 'precipitation_list=[]\n' - 'for precipitation in ["Precipitation"]:\n' - ' precipitation_list.append("Precipitation")\n' - 'for precipitation in precipitation_list:\n' - ' precipitation_total=precipitation_total + precipitation\n' - 'print(precipitation_total)\n') - ret = list_str_as_list_var(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip\n' - 'print(total_precipitation)\n') - ret = list_str_as_list_var(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_append_and_sum(self): - self.to_source('precipitation_total=0\n' - 'precipitation_list=[]\n' - 'for precipitation in weather_reports["Precipitation"]:\n' - ' precipitation_list.append("Precipitation")\n' - 'for precipitation in precipitation_list:\n' - ' precipitation_total= precipitation_total + 1\n' - 'print(precipitation_total)\n') - ret = append_and_sum() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip\n' - 'print(total_precipitation)\n') - ret = append_and_sum() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_iter_prop_dict_acc(self): - self.to_source('for weather_reports["Precipitation"] in weather_reports:\n' - ' total = weather_reports[Precipitation] + total\n' - 'print(total)\n') - ret = iter_prop_dict_acc() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip\n' - 'print(total_precipitation)\n') - ret = iter_prop_dict_acc() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_list_str_dict(self): - # TODO: Check output string - keys = ['Data', 'Date', "Station", "Temperature", "Precipitation", "Wind", "Min Temp", "Max Temp", "Avg Temp", - "Direction", "Speed", "Month", "Year", "Week of", "Full", "State", "Code", "City", "Location"] - self.to_source('total=0\n' - 'number=0\n' - 'for precipitation1 in "Precipitation":\n' - ' total= total+ precipitation1["Precipitation"]\n' - ' number= number + 1\n' - 'average= total/ total\n' - 'print(average)\n') - ret = list_str_dict(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip\n' - 'print(total_precipitation)\n') - ret = list_str_dict(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_missing_key(self): - # TODO: Check output string - keys = ["Precipitation", "Data"] - self.to_source('total=0\n' - 'number=0\n' - 'for precipitation1 in "Precipitation":\n' - ' total= total+ precipitation1["Precipitation"]\n' - ' number= number + 1\n' - 'average= total/ total\n' - 'print(average)\n') - ret = missing_key(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip["Data"]["Precipitation"]\n' - 'print(total_precipitation)\n') - ret = missing_key(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_blank_key(self): - keys = ["distance", "time"] - self.to_source('distance_in_kilometers = trip_data["____"]/1000\n' - 'trip_data = {"distance":123000.0, "time":14000.0}\n' - 'print(average_speed_in_mph) \n' - 'average_speed_in_mph = ____ / time_in_hours\n' - 'time_in_hours = trip_data["____"]/____\n' - '____ = distance_in_kilometers / 1.6\n') - ret = blank_key(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('trip_data = {"distance":123000.0, "time":14000.0}\n' - 'distance_in_kilometers = trip_data["distance"]/1000\n' - 'distance_in_miles = distance_in_kilometers / 1.6\n' - 'time_in_hours = trip_data["time"]/3600\n' - 'average_speed_in_mph = distance_in_miles / time_in_hours\n' - 'print(average_speed_in_mph) \n') - ret = blank_key(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_dict_parens_brack(self): - # TODO: Check output string - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for rain in weather_reports:\n' - ' sum = sum + weather_reports(["Data"]["Precipitation"])\n' - 'print(sum)\n') - ret = dict_parens_brack() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('book = {"number_of_pages":285, "price":99.23, "discount":0.1}\n' - 'print(["price"])') - ret = dict_parens_brack() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip["Data"]["Precipitation"]\n' - 'print(total_precipitation)\n') - ret = dict_parens_brack() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_comma_dict_acc(self): - # TODO: Check output string - self.to_source("import weather\n" - "weather_reports = weather.get_weather()\n" - "total = 0\n" - "for report in weather_reports:\n" - " total = total + report['Data'],['Precipitation']\n" - "print(total)\n") - ret = comma_dict_acc() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip["Data"]["Precipitation"]\n' - 'print(total_precipitation)\n') - ret = comma_dict_acc() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_no_dict_in_loop(self): - # TODO: Check output values - self.to_source("import weather\n" - "weather_reports = weather.get_weather()\n" - "total = 0\n" - "for precip in weather_reports:\n" - " total = total + precip\n" - "print(total)\n") - ret = no_dict_in_loop() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip2["Data"]["Precipitation"]\n' - 'print(total_precipitation)\n') - ret = no_dict_in_loop() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip["Data"]["Precipitation"]\n' - 'print(total_precipitation)\n') - ret = no_dict_in_loop() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'key = "Precipitation"\n' - 'for city in weather_reports:\n' - ' total_precipitation = total_precipitation + city[key]\n' - 'print(total_precipitation)\n') - ret = no_dict_in_loop() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def func_filter(self): - keys = ['Data', 'Date', "Station", "Temperature", "Precipitation", "Wind", "Min Temp", "Max Temp", "Avg Temp", - "Direction", "Speed", "Month", "Year", "Week of", "Full", "State", "Code", "City", "Location"] - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation = 0\n' - 'for report in weather_reports:\n' - ' total_pecipitation = total_precipitation + weather.get_weather("Data")\n' - 'print(total_precipitation)\n') - ret = func_filter(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'precipitation_total=0\n' - 'weather_reports = weather.get_weather("Precipitation")\n' - 'for report in weather_reports:\n' - ' precipitation_total = precipitation_total + 1\n') - ret = func_filter(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip["Data"]["Precipitation"]\n' - 'print(total_precipitation)\n') - ret = func_filter(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source("report_list = classics.get_books(test=True)") - ret = func_filter(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_str_list(self): - # TODO: check output values - keys = ['Data', 'Date', "Station", "Temperature", "Precipitation", "Wind", "Min Temp", "Max Temp", "Avg Temp", - "Direction", "Speed", "Month", "Year", "Week of", "Full", "State", "Code", "City", "Location"] - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'totalPrecip = 0\n' - 'for weather in weather_reports:\n' - ' totalPrecip = totalPrecip + ["Precipitation"]\n' - 'print(totalPrecip)\n') - ret = str_list(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip["Data"]["Precipitation"]\n' - 'print(total_precipitation)\n') - ret = str_list(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_list_var_dict_acc(self): - # TODO: Check output values - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'pt = 0\n' - 'for precipitation in weather_reports["Precipitation"]:\n' - ' pt = pt + precipiation\n' - 'print(pt)\n') - ret = list_var_dict_acc() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('total_precipitation = 0\n' - 'for precip in weather_reports:\n' - ' total_precipitation = total_precipitation + precip["Data"]["Precipitation"]\n' - 'print(total_precipitation)\n') - ret = list_var_dict_acc() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_key_comp(self): - # TODO: Check output values - keys = ['Data', 'Date', "Station", "Temperature", "Precipitation", "Wind", "Min Temp", "Max Temp", "Avg Temp", - "Direction", "Speed", "Month", "Year", "Week of", "Full", "State", "Code", "City", "Location"] - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' if weather_instance["Data"] == "Precipitation":\n' - ' sum = sum + weather_instance["Data"]\n' - 'print(sum)\n') - ret = key_comp(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'data = "Data"\n' - 'precip = "Precipitation"\n' - 'for weather_instance in weather_reports:\n' - ' if weather_instance[data] == precip:\n' - ' sum = sum + weather_instance[data]\n' - 'print(sum)\n') - ret = key_comp(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' if weather_instance["Station"]["City"] == "Chicago":\n' - ' sum = sum + weather_instance["Data"]["Precipitation"]\n' - 'print(sum)\n') - ret = key_comp(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'loc1 = "Station"\n' - 'loc2 = "City"\n' - 'data = "Data"\n' - 'precip = "Precipitation"\n' - 'for weather_instance in weather_reports:\n' - ' if weather_instance[loc1][loc2] == "Chicago":\n' - ' sum = sum + weather_instance[data][precip]\n' - 'print(sum)\n') - ret = key_comp(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'data = "Data"\n' - 'precip = "Precipitation"\n' - 'for weather_instance in weather_reports:\n' - ' loc1 = weather_instance["Station"]["City"]\n' - ' if loc1 == "Chicago":\n' - ' sum = sum + weather_instance[data][precip]\n' - 'print(sum)\n') - ret = key_comp(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'precip = "Precipitation"\n' - 'for weather_instance in weather_reports:\n' - ' data = weather_instance["Data"]\n' - ' if data == precip:\n' - ' sum = sum + weather_instance[data]\n' - 'print(sum)\n') - ret = key_comp(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('for reports in weather_reports:\n' - ' if report["Station"]["City"] == "Chicago":\n' - ' trend.append(reports["Data"]["Precipitation"])') - ret = key_comp(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source("weather_reports = weather.get_weather()\n" - "for report in weather_reports:\n" - " City = report['Station']['City']\n" - " if City == 'Blacksburg':\n" - " pass\n") - ret = key_comp(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source("weather_reports = weather.get_weather()\n" - "for report in weather_reports:\n" - " City = report['Station']\n" - " if City == 'City':\n" - " pass\n") - ret = key_comp(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - def test_col_dict(self): - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'precipitation = 0\n' - 'for weather in weather_reports:\n' - ' preciptation = precipitaion + weather["Data":"Precipitation"]\n' - 'print(precipitation)\n') - ret = col_dict() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' if weather_instance["Station"]["City"] == "Chicago":\n' - ' sum = sum + weather_instance["Data"]["Precipitation"]\n' - 'print(sum)\n') - ret = col_dict() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_var_key(self): - # TODO: Check output value - keys = ['Data', 'Date', "Station", "Temperature", "Precipitation", "Wind", "Min Temp", "Max Temp", "Avg Temp", - "Direction", "Speed", "Month", "Year", "Week of", "Full", "State", "Code", "City", "Location"] - self.to_source("import weather\n" - "weather_reports = weather.get_weather()\n" - "sum = 0\n" - "for rain in weather_reports:\n" - " if rain[Station][City] == Chicago:\n" - " sum = sum + rain[Data][Precipitation]\n" - "print(sum)\n") - ret = var_key(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' if weather_instance["Station"]["City"] == "Chicago":\n' - ' sum = sum + weather_instance["Data"]["Precipitation"]\n' - 'print(sum)\n') - ret = var_key(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('import weather\n' - 'Station = "Station"\n' - 'City = "City"\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' if weather_instance[Station][City] == "Chicago":\n' - ' sum = sum + weather_instance["Data"]["Precipitation"]\n' - 'print(sum)\n') - ret = var_key(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_key_order(self): - keys1 = ["Station", "City"] - keys2 = ["Data", "Precipitation"] - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather in weather_reports:\n' - ' if weather["Station"]["City"] == "Chicago":\n' - ' sum = sum + weather_instance["Chicago"]["Precipitation"]\n' - 'print(sum)\n') - ret = key_order(keys1) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - ret = key_order(keys2) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' if weather_instance["Station"]["City"] == "Chicago":\n' - ' sum = sum + weather_instance["Data"]["Precipitation"]\n' - 'print(sum)\n') - ret = key_order(keys1) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - ret = key_order(keys2) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' station = weather_instance["Station"]\n' - ' city = station["City"]\n' - ' if city == "Chicago":\n' - ' data = weather_instance["Data"]\n' - ' precipitation = data["Precipitation"]\n' - ' sum = sum + precipitation\n' - 'print(sum)\n') - ret = key_order(keys1) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - ret = key_order(keys2) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_key_order_unchained(self): - keys1 = ["Station", "City"] - keys2 = ["Data", "Precipitation"] - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' station = weather_instance["City"]\n' - ' city = station["Station"]\n' - ' if city == "Chicago":\n' - ' data = weather_instance["Precipitation"]\n' - ' precipitation = data["Data"]\n' - ' sum = sum + precipitation\n' - 'print(sum)\n') - ret = key_order_unchained(keys1) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - ret = key_order_unchained(keys2) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' station = weather_instance["Station"]\n' - ' city = station["City"]\n' - ' if city == "Chicago":\n' - ' data = weather_instance["Data"]\n' - ' precipitation = data["Precipitation"]\n' - ' sum = sum + precipitation\n' - 'print(sum)\n') - ret = key_order_unchained(keys1) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - ret = key_order_unchained(keys2) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' if weather_instance["Station"]["City"] == "Chicago":\n' - ' sum = sum + weather_instance["Data"]["Precipitation"]\n' - 'print(sum)\n') - ret = key_order_unchained(keys1) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - ret = key_order_unchained(keys2) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_filt_key(self): - # TODO: Check output values - c_value = "Chicago" - num_slices = 3 - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation_Chicago = 0\n' - 'total_precipitation_Chicago\n' - 'for report in weather_reports:\n' - ' total_precipitation = total_precipitation + report["Data"]["Precipitation"]["Chicago"]\n' - 'print (total_precipitation)\n') - ret = filt_key(c_value, num_slices) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation_Chicago = 0\n' - 'for report in weather_reports:\n' - ' precip = report["Data"]["Precipitation"]\n' - ' chicago_precip = precip["Chicago"]\n' - ' total_precipitation = total_precipitation + chicago_recip\n' - 'print (total_precipitation)\n') - ret = filt_key(c_value, num_slices) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' if weather_instance["Station"]["City"] == "Chicago":\n' - ' sum = sum + weather_instance["Data"]["Precipitation"]\n' - 'print(sum)\n') - ret = filt_key(c_value, num_slices) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_miss_dict_acc(self): - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' if City == "Chicago":\n' - ' sum = sum + "Precipitation"\n' - 'print(sum)\n') - ret = miss_dict_acc() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation_Chicago = 0\n' - 'total_precipitation_Chicago\n' - 'for report in weather_reports:\n' - ' precip = report["Data"]["Precipitation"]\n' - ' chicago_precip = precip["Chicago"]\n' - ' total_precipitation = total_precipitation + chicago_recip\n' - 'print (total_precipitation)\n') - ret = miss_dict_acc() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_compare_key(self): - c_value = "Chicago" - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather_instance in weather_reports:\n' - ' if weather_instance["Station"]["City"] == ["Chicago"]:\n' - ' sum = sum + weather_instance["Data"]["Precipitation"]\n' - 'print(sum)\n') - ret = compare_key(c_value) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation_Chicago = 0\n' - 'total_precipitation_Chicago\n' - 'for report in weather_reports:\n' - ' precip = report["Data"]["Precipitation"]\n' - ' chicago_precip = precip["Chicago"]\n' - ' total_precipitation = total_precipitation + chicago_recip\n' - 'print (total_precipitation)\n') - ret = compare_key(c_value) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_str_equality(self): - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'sum = 0\n' - 'for weather in weather_reports:\n' - ' if("City" == "Chichago"):\n' - ' sum = sum + weather["Data"]["Precipitation"]\n' - 'print(sum)\n') - ret = str_equality() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation = 0\n' - 'for report in weather_reports:\n' - ' if report["Station"]["City" == "Chicago"]:\n' - ' total_precipitation = total_precipitation + report["Data"]["Precipitation"]\n' - 'print(total_precipitation)\n') - ret = str_equality() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation_Chicago = 0\n' - 'total_precipitation_Chicago\n' - 'for report in weather_reports:\n' - ' precip = report["Data"]["Precipitation"]\n' - ' chicago_precip = precip["Chicago"]\n' - ' total_precipitation = total_precipitation + chicago_recip\n' - 'print (total_precipitation)\n') - ret = str_equality() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_unnecessary_cast(self): - cast_list = ["float"] - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation_Chicago = 0\n' - 'total_precipitation_Chicago\n' - 'for report in weather_reports:\n' - ' precip = report["Data"]["Precipitation"]\n' - ' chicago_precip = int(precip["Chicago"])\n' - ' total_precipitation = total_precipitation + chicago_recip\n' - 'print (total_precipitation)\n') - ret = unnecessary_cast(cast_list) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation_Chicago = 0\n' - 'total_precipitation_Chicago\n' - 'for report in weather_reports:\n' - ' precip = report["Data"]["Precipitation"]\n' - ' chicago_precip = precip["Chicago"]\n' - ' total_precipitation = total_precipitation + chicago_recip\n' - 'print (total_precipitation)\n') - ret = unnecessary_cast(cast_list) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_fetch_acc_dict(self): - keys = ["Data", "Precipitation", "Station", "Chicago"] - self.to_source('import weather\n' - 'precipitation = 0\n' - 'weather_reports = weather.get_weather("Chicago")\n' - 'where = weather.get_weather["Chicago"]\n' - 'for weather in weather_reports:\n' - ' precipitation = precipitation + weather["Data"]["Precipitation"]\n' - 'print(precipitation)\n') - ret = fetch_acc_dict(keys) - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation_Chicago = 0\n' - 'total_precipitation_Chicago\n' - 'for report in weather_reports:\n' - ' precip = report["Data"]["Precipitation"]\n' - ' chicago_precip = precip["Chicago"]\n' - ' total_precipitation = total_precipitation + chicago_recip\n' - 'print (total_precipitation)\n') - ret = fetch_acc_dict(keys) - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_app_assign(self): - self.to_source('import weather\n' - 'import matplotlib.pyplot as plt\n' - 'weather_reports = weather.get_weather()\n' - 'sum = []\n' - 'for rain in weather_reports:\n' - ' if rain["Station"]["City"] == "Chicago":\n' - ' sum = sum.append(rain["Data"]["Precipitation"])\n' - 'plt.plot(sum)\n' - 'plt.xlabel("Years")\n' - 'plt.ylabel("Precipitation")\n' - 'plt.title("Chicago Rain")\n' - 'plt.show(sum)\n') - ret = app_assign() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation_Chicago = 0\n' - 'total_precipitation_Chicago\n' - 'for report in weather_reports:\n' - ' precip = report["Data"]["Precipitation"]\n' - ' chicago_precip = precip["Chicago"]\n' - ' total_precipitation = total_precipitation + chicago_recip\n' - 'print (total_precipitation)\n') - ret = app_assign() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_show_args(self): - self.to_source('import weather\n' - 'import matplotlib.pyplot as plt\n' - 'weather_reports = weather.get_weather()\n' - 'sum = []\n' - 'for rain in weather_reports:\n' - ' if rain["Station"]["City"] == "Chicago":\n' - ' sum.append(rain["Data"]["Precipitation"])\n' - 'plt.plot(sum)\n' - 'plt.xlabel("Years")\n' - 'plt.ylabel("Precipitation")\n' - 'plt.title("Chicago Rain")\n' - 'plt.show(sum)\n') - ret = show_args() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'import matplotlib.pyplot as plt\n' - 'weather_reports = weather.get_weather()\n' - 'sum = []\n' - 'for rain in weather_reports:\n' - ' if rain["Station"]["City"] == "Chicago":\n' - ' sum.append(rain["Data"]["Precipitation"])\n' - 'plt.plot(sum)\n' - 'plt.xlabel("Years")\n' - 'plt.ylabel("Precipitation")\n' - 'plt.title("Chicago Rain")\n' - 'plt.show()\n') - ret = show_args() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_dict_plot(self): - self.to_source('import weather\n' - 'import matplotlib.pyplot as plt\n' - 'weather_reports = {}\n' - 'sum = []\n' - 'for rain in weather_reports:\n' - ' if rain["Station"]["City"] == "Chicago":\n' - ' sum.append(rain["Data"]["Precipitation"])\n' - 'plt.plot(weather_reports)\n' - 'plt.xlabel("Years")\n' - 'plt.ylabel("Precipitation")\n' - 'plt.title("Chicago Rain")\n' - 'plt.show()\n'.format(self._dict_str)) - ret = dict_plot() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'import matplotlib.pyplot as plt\n' - 'weather_reports = weather.get_weather()\n' - 'cityWeather = input("Choose a city: ")\n' - 'cityPrecip = []\n' - '# add other input and variable initializations here\n' - '# Put here the code to create the list of data to be plotted.\n' - 'for weather in weather_reports:\n' - ' if weather["Station"]["City"] == cityWeather:\n' - ' cityPrecip.append(weather["Data"]["Precipitation"])\n' - '# Put here the code to display a properly labelled line plot of the list of data.\n' - 'plt.plot(cityPrecip)\n' - 'plt.title(cityWeather)\n' - 'plt.xlabel("Trend")\n' - 'plt.ylabel("Amount of Precipitation")\n' - 'plt.show()\n') - ret = dict_plot() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('import weather\n' - 'import matplotlib.pyplot as plt\n' - 'weather_reports = weather.get_weather()\n' - 'sum = []\n' - 'for rain in weather_reports:\n' - ' if rain["Station"]["City"] == "Chicago":\n' - ' sum.append(rain["Data"]["Precipitation"])\n' - 'plt.plot(sum)\n' - 'plt.xlabel("Years")\n' - 'plt.ylabel("Precipitation")\n' - 'plt.title("Chicago Rain")\n' - 'plt.show()\n') - ret = dict_plot() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - self.to_source('import classics\n' - 'report_list = classics.get_books(test=True)\n' - 'for report in report_list:\n' - ' hist = report["bibliography"]["type"]\n' - ' if hist == "Text":\n' - ' list.append("Text")\n' - 'plt.hist(list)\n' - 'plt.x("test")\n' - 'plt.y("test")\n' - 'plt.title(list)\n') - ret = dict_plot() - self.assertFalse(ret, "Didn't give message returned {} instead".format(ret)) - - def test_comp_in_dict_acc(self): - self.to_source('import weather\n' - 'weather_reports = weather.get_weather()\n' - 'total_precipitation_Chicago = 0\n' - 'for report in weather_reports:\n' - ' if report["Station"]["City" == "Chicago"]:\n' - ' total_precipitation_Chicago = total_precipitation_Chicago + ' - 'report["Data"]["Precipitation"]\n' - 'print(total_precipitation_Chicago)\n') - ret = comp_in_dict_acc() - self.assertTrue(ret, "Didn't give message, returned {} instead".format(ret)) - - self.to_source('import weather\n' - 'import matplotlib.pyplot as plt\n' - 'weather_reports = weather.get_weather()\n' - 'sum = []\n' - 'for rain in weather_reports:\n' - ' if rain["Station"]["City"] == "Chicago":\n' - ' sum.append(rain["Data"]["Precipitation"])\n' - 'plt.plot(sum)\n' - 'plt.xlabel("Years")\n' - 'plt.ylabel("Precipitation")\n' - 'plt.title("Chicago Rain")\n' - 'plt.show()\n') - ret = dict_plot() - self.assertFalse(ret, "Expected False, got {} instead".format(ret)) - - def test_general_testing(self): - self.to_source('print("fun")') - matches = find_matches("_var_") - var = matches[0]["_var_"] - self.assertTrue(var.ast_name == "Name", "is: {}".format(var.ast_name)) - - def test_group(self): - self.to_source("earthquake_report = [{'Location' : 'California', 'Magnitude' : 2.3, 'Depth' : 7.66},\n" - " {'Location' : 'Japan', 'Magnitude' : 5.3, 'Depth' : 3.34},\n" - " {'Location' : 'Burma', 'Magnitude' : 4.9, 'Depth' :97.07},\n" - " {'Location' : 'Alaska', 'Magnitude' : 4.6, 'Depth' : 35.0},\n" - " {'Location' : 'Washington', 'Magnitude' : 2.19, 'Depth' : 15.28},\n" - " {'Location' : 'China', 'Magnitude' : 4.3, 'Depth' : 10.0}\n" - " ]\n" - "total = 0\n" - "number = 0\n" - "for earthquake_report in earthquake_reports:\n" - " total = total + earthquake_report['Magnitude']\n" - " number = 1 + number\n" - "average = total / number\n" - "print(average)" - ) - target_dict = ('_quake_dict_list_ = [{"Location": "California", "Magnitude": 2.3, "Depth": 7.66},' - '{"Location": "Japan", "Magnitude": 5.3, "Depth": 3.34},' - '{"Location": "Burma", "Magnitude": 4.9, "Depth": 97.07},' - '{"Location": "Alaska", "Magnitude": 4.6, "Depth": 35.0},' - '{"Location": "Washington", "Magnitude": 2.19, "Depth": 15.28},' - '{"Location": "China", "Magnitude": 4.3, "Depth": 10.0}]') - matches = find_matches(target_dict) - if not matches: - explain_r("You need to properly define a dictionary for the abstraction first", "dict_def_err", - label="Dictionary Definition Incorrect") - - all_keys = ["Location", "Magnitude", "Depth"] - unused_keys = ["Location", "Depth"] - used_keys = ["Magnitude"] - dict_acc_group(all_keys, unused_keys, used_keys) - dict_list_group(all_keys) - - target_list = [2.3, 5.3, 4.9, 4.6, 2.19, 4.3] - ___target_avg = sum(target_list) / len(target_list) - - prevent_literal(___target_avg, str(___target_avg)) - - (success, score, category, label, - message, data, hide) = simple.resolve() - # self.assertFalse(success) - # self.assertEqual(message, 'You should always create unit tests.') - self.assertEqual(message, "The list of Dictionaries earthquake_report is not itself a dictionary. " - "To access key-value pairs of the dictionaries in the list, you need to access each " - "dictionary in the list one at a time.

    (list_dict)

    ") diff --git a/src/lib/pedal/__init__.py b/src/lib/pedal/__init__.py deleted file mode 100644 index 27602eb32e..0000000000 --- a/src/lib/pedal/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -""" -A package for analyzing student code. -""" - -# Probably want to import useful stuff from: -# report -# source -# sandbox -# tifa -# cait -# resolver -# etc. - -from pedal.cait import (find_match, find_matches, - parse_program, - find_submatches, find_expr_sub_matches, - def_use_error, data_state, data_type, - expire_cait_cache) -from pedal.report.imperative import (suppress, explain, compliment, - give_partial, gently, set_success) -from pedal.sandbox.sandbox import run, reset -from pedal.tifa import tifa_analysis -from pedal.source import (set_source, check_section_exists, next_section, - set_source_file) diff --git a/src/lib/pedal/assertions/__init__.py b/src/lib/pedal/assertions/__init__.py deleted file mode 100644 index cbb49e6c91..0000000000 --- a/src/lib/pedal/assertions/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from pedal.report.imperative import MAIN_REPORT - -from pedal.assertions.setup import _setup_assertions, resolve_all -from pedal.assertions.assertions import * -from pedal.assertions.organizers import * - - -def set_assertion_mode(exceptions=True, report=None): - if report is None: - report = MAIN_REPORT - _setup_assertions(report) - - report['assertions']['exceptions'] = exceptions diff --git a/src/lib/pedal/assertions/assertions.py b/src/lib/pedal/assertions/assertions.py deleted file mode 100644 index 5d7d3c0c33..0000000000 --- a/src/lib/pedal/assertions/assertions.py +++ /dev/null @@ -1,625 +0,0 @@ -import string -import re - -from pedal.report.imperative import MAIN_REPORT -from pedal.sandbox.result import SandboxResult -from pedal.sandbox.exceptions import SandboxException -from pedal.sandbox.sandbox import DataSandbox -from pedal.assertions.setup import _setup_assertions, AssertionException -from pedal.assertions.tests import _normalize_string, strip_punctuation, equality_test, output_test - - -# TODO: Allow bundling of assertions to make a table - - -def iterable(obj): - return hasattr(obj, '__iter__') or hasattr(obj, '__getitem__') - - -DELTA = .001 -_MAX_LENGTH = 80 - - -def _escape_curly_braces(result): - return result.replace("{", "{{").replace("}", "}}") - - -def safe_repr(obj, short=False): - try: - result = repr(obj) - except Exception: - result = object.__repr__(obj) - if short and len(result) >= _MAX_LENGTH: - result = result[:_MAX_LENGTH] + ' [truncated]...' - result = result - return result - - -def _fail(code_message, actual_message, expected_message, - show_expected_value, modify_right, *values): - normal_values = [] - sandboxed_values = [] - sandboxed_results = [] - if modify_right and values: - values = values[:-1] + (modify_right(values[-1]),) - for value in values: - if is_sandbox_result(value): - sandboxed_results.append(value) - value = value._actual_value - sandboxed_values.append(safe_repr(value)) - else: - normal_values.append(safe_repr(value)) - if sandboxed_results: - code_message = _build_context(sandboxed_results, actual_message, - expected_message, show_expected_value) - return AssertionException(code_message.format(*(sandboxed_values + normal_values))) - - -def _build_result_from_target(target, index, quantity): - if target == "_": - if quantity == 1: - return "the result" - elif index == 0: - return "the first result" - else: - return "the second result" - return "" + target + "" - - -def _build_context(sandboxed_results, actual_message, expected_message, - show_expected_value): - context = [] - calls = [] - inputs = [] - outputs = [] - targets = [] - for result in sandboxed_results: - # Look up info - call_id = result._actual_call_id - sandbox = result._actual_sandbox - outputs.extend(sandbox.output_contexts[call_id]) - calls.extend(sandbox.call_contexts[call_id]) - inputs.extend(sandbox.input_contexts[call_id]) - targets.append(sandbox.target_contexts[call_id]) - # Actual rendering of text - if calls: - calls = [_escape_curly_braces(str(call)) for call in calls] - context.append("I ran:\n
    " + "\n".join(calls) + "
    ") - if inputs: - inputs = [_escape_curly_braces(str(inp)) for inp in inputs] - context.append("I entered as input:\n
    " + "\n".join(inputs) + "
    ") - actual_message += ":\n
    {}
    " - for i, target in enumerate(targets): - named_target = _build_result_from_target(target, i, len(targets)) - if target == '_': - context.append(named_target.capitalize() + " " + actual_message) - else: - context.append("The value of " + named_target + " " + actual_message) - expected_context = "But I expected " - if len(targets) == 2: - expected_context += _build_result_from_target(targets[0], 0, 2) - expected_context += " " + expected_message + " " - expected_context += _build_result_from_target(targets[1], 1, 2) - else: - expected_context += _build_result_from_target(targets[0], 0, 1) - expected_context += " " + expected_message - if show_expected_value: - expected_context += ":\n
    {}
    " - context.append(expected_context) - return "\n".join(context) - - -def is_sandbox_result(value): - if hasattr(value, "__actual_class__"): - if value.__actual_class__ == SandboxResult: - return True - return False - - -def _basic_assertion(left, right, operator, code_comparison_message, - hc_message, hc_message_past, message, report, contextualize, - show_expected_value=True, modify_right=None): - if report is None: - report = MAIN_REPORT - _setup_assertions(report) - context = "" - if message: - message = "\n" + message - else: - message = "" - # TODO: Handle right-side sandbox result - # if is_sandbox_result(right): - # right = right._actual_value - if isinstance(left, Exception): - return False - if isinstance(right, Exception): - return False - if not operator(left, right): - failure = _fail(code_comparison_message, hc_message, hc_message_past, - show_expected_value, modify_right, left, right) - report['assertions']['collected'].append(failure) - report.attach('Instructor Test', category='student', tool='Assertions', - mistake={'message': "Student code failed instructor test.
    \n" + - context + str(failure) + message}) - report['assertions']['failures'] += 1 - if report['assertions']['exceptions']: - raise failure - else: - return False - return True - - -PRE_VAL = "" - - -def assertEqual(left, right, score=None, message=None, report=None, - contextualize=True, exact=False, compare_lengths=None): - if compare_lengths is None: - compare_lengths = (iterable(left) and isinstance(right, (int, float))) - if _basic_assertion(left, right, - lambda l, r: - equality_test(len(l), r, exact, DELTA) if - compare_lengths else - equality_test(l, r, exact, DELTA), - "len({}) != {}" if compare_lengths else "{} != {}", - "was" + PRE_VAL, - "to have its length equal to" - if compare_lengths else "to be equal to", - message, report, contextualize): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -assert_equal = assertEqual - - -def assertNotEqual(left, right, score=None, message=None, report=None, - contextualize=True, exact=False): - if _basic_assertion(left, right, - lambda l, r: not equality_test(l, r, exact, DELTA), - "{} == {}", - "was" + PRE_VAL, - "to not be equal to", - message, report, contextualize): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def assertTrue(something, score=None, message=None, report=None, - contextualize=True): - if _basic_assertion(something, True, - lambda l, r: bool(l), - "{} is true", - "was false" + PRE_VAL, - "to be true", - message, report, contextualize, - show_expected_value=False): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def assertFalse(something, score=None, message=None, report=None, - contextualize=True): - if _basic_assertion(something, False, - lambda l, r: not bool(l), - "{} is false", - "was true" + PRE_VAL, - "to be false", - message, report, contextualize, - show_expected_value=False): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def assertIs(left, right, score=None, message=None): - pass - - -def assertIsNot(left, right, score=None, message=None): - pass - - -def _actually_is_none(l, r): - if is_sandbox_result(l): - return l._actual_value is None - return l is None - - -def assertIsNone(something, score=None, message=None, report=None, - contextualize=True): - if _basic_assertion(something, None, - _actually_is_none, - "{} is none", - "was" + PRE_VAL, - "to be none", - message, report, contextualize, - show_expected_value=False): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def _actually_is_not_none(l, r): - if is_sandbox_result(l): - return l._actual_value is not None - return l is not None - - -def assertIsNotNone(something, score=None, message=None, report=None, - contextualize=True): - if _basic_assertion(something, None, - _actually_is_not_none, - "{} is not none", - "was" + PRE_VAL, - "to not be none", - message, report, contextualize, - show_expected_value=False): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def assertIn(needle, haystack, score=None, message=None, report=None, - contextualize=True): - expected_message = "to be in" - if not is_sandbox_result(needle) and is_sandbox_result(haystack): - expected_message = "to contain" - if _basic_assertion(needle, haystack, - lambda n, h: n in h, - "{} not in {}", - "was" + PRE_VAL, - expected_message, - message, report, contextualize): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def assertNotIn(needle, haystack, score=None, message=None, report=None, - contextualize=True): - expected_message = "to not be in" - if not is_sandbox_result(needle) and is_sandbox_result(haystack): - expected_message = "to not contain" - if _basic_assertion(needle, haystack, - lambda n, h: n not in h, - "{} in {}", - "was" + PRE_VAL, - expected_message, - message, report, contextualize): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def _humanize_type(t): - if hasattr(t, '__name__'): - return t.__name__ - else: - return str(t) - - -def _humanize_types(types): - if isinstance(types, tuple): - return ', '.join(_humanize_type(t) for t in types) - return _humanize_type(types) - - -def assertIsInstance(value, types, score=None, message=None, report=None, - contextualize=True): - if _basic_assertion(value, types, - lambda v, t: isinstance(v, t), - "isinstance({}, {})", - "was" + PRE_VAL, - "to be of type", - message, report, contextualize, - modify_right=_humanize_types): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def assertNotIsInstance(value, types): - pass - - -def assertRaises(exception): - pass - - -def assertRaisesRegexp(exception): - pass - - -def assertAlmostEqual(left, right): - pass - - -def assertNotAlmostEqual(left, right): - pass - - -def assertGreater(left, right, score=None, message=None, report=None, - contextualize=True, compare_lengths=None): - if compare_lengths is None: - compare_lengths = (iterable(left) and isinstance(right, (int, float))) - if _basic_assertion(left, right, - lambda l, r: - len(l) > r if - compare_lengths else - l > r, - "len({}) <= {}" if compare_lengths else "{} <= {}", - "was" + PRE_VAL, - "to have its length greater than" - if compare_lengths else - "to be greater than", - message, report, contextualize): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def assertGreaterEqual(left, right, score=None, message=None, report=None, - contextualize=True, compare_lengths=None): - if compare_lengths is None: - compare_lengths = (iterable(left) and isinstance(right, (int, float))) - if _basic_assertion(left, right, - lambda l, r: - len(l) >= r if - compare_lengths else - l >= r, - "len({}) < {}" if compare_lengths else "{} < {}", - "was" + PRE_VAL, - "to have its length greater than or equal to" if compare_lengths else - "to be greater than or equal to", - message, report, contextualize): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def assertLess(left, right, score=None, message=None, report=None, - contextualize=True, compare_lengths=None): - if compare_lengths is None: - compare_lengths = (iterable(left) and isinstance(right, (int, float))) - if _basic_assertion(left, right, - lambda l, r: - len(l) < r if - compare_lengths else - l < r, - "len({}) >= {}" if compare_lengths else "{} >= {}", - "was" + PRE_VAL, - "to have its length less than" - if compare_lengths else - "to be less than", - message, report, contextualize): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def assertLessEqual(left, right, score=None, message=None, report=None, - contextualize=True, compare_lengths=None): - if compare_lengths is None: - compare_lengths = (iterable(left) and isinstance(right, (int, float))) - if _basic_assertion(left, right, - lambda l, r: - len(l) <= r if - compare_lengths else - l <= r, - "len({}) > {}" if compare_lengths else "{} > {}", - "was" + PRE_VAL, - "to have its length less than or equal to" if compare_lengths else - "to be less than or equal to", - message, report, contextualize): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def assertRegexpMatches(text, pattern): - pass - - -def assertNotRegexpMatches(text, pattern): - pass - - -def assertItemsEqual(left, right): - pass - - -def assertDictContainsSubset(left, right): - pass - - -def assertMultiLineEqual(left, right): - pass - - -def assertSequenceEqual(left, right): - pass - - -# TODO: assertPrintIncludes - -# Speciality Asserts -def assertPrints(result, expected_output, args=None, returns=None, - score=None, message=None, report=None, - contextualize=True, exact=False): - if not isinstance(result, SandboxResult): - return False - raise TypeError("You must pass in a SandboxResult (e.g., using `call`) to assertPrints") - if report is None: - report = MAIN_REPORT - _setup_assertions(report) - call_id = result._actual_call_id - sandbox = result._actual_sandbox - calls = sandbox.call_contexts[call_id] - inputs = sandbox.input_contexts[call_id] - actual_output = sandbox.output_contexts[call_id] - if not output_test(actual_output, expected_output, exact): - context = [] - if calls: - context.append("I ran:\n
    " +
    -                           "\n".join(map(str, calls)) +
    -                           "
    ") - if inputs: - context.append("I entered as input:\n
    " +
    -                           "\n".join(map(str, inputs)) +
    -                           "
    ") - if actual_output: - context.append("The function printed:\n
    " +
    -                           "\n".join(map(str, actual_output)) +
    -                           "
    ") - else: - context.append("The function printed nothing.") - context.append("But I expected the output:\n
    " + "\n".join(map(str, expected_output)) + "
    ") - failure = AssertionException("\n".join(context)) - report['assertions']['collected'].append(failure) - report.attach('Instructor Test', category='student', tool='Assertions', - mistake={'message': "Student code failed instructor test.
    \n" + - str(failure)}) - report['assertions']['failures'] += 1 - if report['assertions']['exceptions']: - raise failure - else: - return False - report.give_partial(score) - return True - - -def assertHasFunction(obj, function, args=None, returns=None, - score=None, message=None, report=None, - contextualize=True, exact=False): - # If object is a sandbox, will check the .data[variable] attribute - # Otherwise, check it directly - if isinstance(obj, DataSandbox): - comparison = lambda o, f: f in o.data - else: - def comparison(o, f): - try: - return f in o - except: - return hasattr(o, f) - if not _basic_assertion(obj, function, - comparison, - "Could not find function {}{}", - "was" + PRE_VAL, - "to have the function", - message, report, contextualize): - return False - if isinstance(obj, DataSandbox): - student_function = obj.data[function] - else: - try: - student_function = obj[function] - except: - student_function = getattr(obj, function) - if _basic_assertion(student_function, function, - lambda l, r: callable(l), - "The value {} is in the variable {}, and that value is not a callable function.", - "was callable" + PRE_VAL, - "to be callable", - message, report, contextualize, - show_expected_value=False): - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - return False - - -def assertHasClass(sandbox, class_name, attrs=None): - pass - - -def assertHas(obj, variable, types=None, value=None, score=None, - message=None, report=None, contextualize=True): - # If object is a sandbox, will check the .data[variable] attribute - # Otherwise, check it directly - if isinstance(obj, DataSandbox): - comparison = lambda o, v: v in o.data - else: - comparison = lambda o, v: v in hasattr(o, v) - if not _basic_assertion(obj, variable, - comparison, - "Could not find variable {}{}", - "was" + PRE_VAL, - "to have the variable", - message, report, contextualize): - return False - if isinstance(obj, DataSandbox): - student_variable = obj.data[variable] - else: - student_variable = getattr(obj, variable) - if types is not None: - if not _basic_assertion(student_variable, types, - lambda v, t: isinstance(v, t), - "isinstance({}, {})", - "was" + PRE_VAL, - "to be of type", - message, report, contextualize, - modify_right=_humanize_types): - return False - if value is not None: - if not _basic_assertion(student_variable, value, - lambda l, r: equality_test(l, r, False, DELTA), - "{} != {}", - "was" + PRE_VAL, - "to be equal to", - message, report, contextualize, - show_expected_value=False): - return False - if report is None: - report = MAIN_REPORT - report.give_partial(score) - return True - - -def assertGenerally(expression, score=None, message=None, report=None, - contextualize=True): - if report is None: - report = MAIN_REPORT - _setup_assertions(report) - if expression: - report.give_partial(score) - return True - else: - report['assertions']['failures'] += 1 - if report['assertions']['exceptions']: - raise AssertionException("General assertion") - else: - return False - -# Allow addition of new assertions -# e.g., assertGraphType, assertGraphValues diff --git a/src/lib/pedal/assertions/organizers.py b/src/lib/pedal/assertions/organizers.py deleted file mode 100644 index 84f00667c4..0000000000 --- a/src/lib/pedal/assertions/organizers.py +++ /dev/null @@ -1,167 +0,0 @@ -''' - -Sections are a way to separate the pieces of a file such that the pieces do not -interfere with each other. - -Phases are a way to chunk a collection of functions together. If one of these -functions fails, the other functions in the phase will continue to be evaluated. -However, that phase will still have failed. You can establish that one phase -comes before or after another phase; if a precondition phase fails, then the -subsequent phase will not run. - -Example: - Students are working on a text adventure game and have to implement a - function named create_world(). The grading for portion of the assignment - has three phases: - 'create_world_exists' which confirms that the function was defined - 'create_world_returns' which confirms that calling the function - produces the right result. - 'create_world_complete' which confirms that the previous phase - terminated in order to give some partial credit. - - Although the 'create_world_exists' phase is composed of one function, the - 'create_world_returns' phase is actually composed of several functions that - check the components of the function. - - @phase('create_world_exists') - - @phase('create_world_returns', after='create_world_exists') - -Phases are reset between sections. - -''' - -from pedal.report.imperative import MAIN_REPORT -from pedal.assertions.setup import (_setup_assertions, AssertionException, - _add_relationships, _add_phase) -from functools import wraps - - -def contextualize_calls(): - pass - - -class _finish_section: - def __init__(self, number, *functions): - if isinstance(number, int): - self.number = number - else: - self.number = -1 - functions = [number] + list(functions) - self.functions = functions - for function in functions: - self(function, False) - - def __call__(self, f=None, quiet=True): - if f is not None: - f() - if quiet: - print("\tNEXT SECTION") - - def __enter__(self): - pass - - def __exit__(self, x, y, z): - print("\tNEXT SECTION") - # return wrapped_f - - -def finish_section(number, *functions, next_section=False): - if len(functions) == 0: - x = _finish_section(number, *functions) - x() - else: - result = _finish_section(number, *functions) - if next_section: - print("\tNEXT SECTION") - return result - - -def section(*args): - ''' - TODO: Deprecate? - ''' - _setup_assertions(MAIN_REPORT) - - def wrap(f): - _add_phase(phase_name, _handle_entry) - MAIN_REPORT['assertions']['phases'].append((section_number, f)) - return f - - section_number = -1 - if len(args) >= 1 and callable(args[0]): - if len(args) >= 2: - section_number = args[1] - return wrap(args[0]) - elif len(args) >= 1: - section_number = args[0] - return wrap - - -def phase(phase_name, before=None, after=None): - ''' - - Args: - phase_name (str): The name of the phase this function will belong to. - before (list[str] or str): the name(s) of any phases that this phase - should be before. - after (list[str] or str): the name(s) of any phases that this phase - should be after. - ''' - _setup_assertions(MAIN_REPORT) - - def wrap(f): - @wraps(f) - def _handle_entry(*args, **kwargs): - old_exception_state = MAIN_REPORT['assertions']['exceptions'] - MAIN_REPORT['assertions']['exceptions'] = True - value = f(*args, **kwargs) - MAIN_REPORT['assertions']['exceptions'] = old_exception_state - return value - - _add_phase(phase_name, _handle_entry) - _add_relationships(phase_name, before) - _add_relationships(after, phase_name) - return _handle_entry - - return wrap - - -def stop_on_failure(f): - _setup_assertions(MAIN_REPORT) - - @wraps(f) - def wrapped(*args, **kwargs): - old_exception_state = MAIN_REPORT['assertions']['exceptions'] - MAIN_REPORT['assertions']['exceptions'] = True - value = None - try: - value = f(*args, **kwargs) - except AssertionException: - pass - MAIN_REPORT['assertions']['exceptions'] = old_exception_state - return value - - return wrapped - - -def try_all(): - _setup_assertions(MAIN_REPORT) - - @wraps(f) - def wrapped(*args, **kwargs): - old_exception_state = MAIN_REPORT['assertions']['exceptions'] - MAIN_REPORT['assertions']['exceptions'] = False - value = f(*args, **kwargs) - MAIN_REPORT['assertions']['exceptions'] = old_exception_state - return value - - return wrapped - - -def precondition(function): - pass - - -def postcondition(function): - pass diff --git a/src/lib/pedal/assertions/setup.py b/src/lib/pedal/assertions/setup.py deleted file mode 100644 index 5df3a598cc..0000000000 --- a/src/lib/pedal/assertions/setup.py +++ /dev/null @@ -1,116 +0,0 @@ -import sys - -from pedal.report.imperative import MAIN_REPORT -from pedal.sandbox.exceptions import SandboxStudentCodeException - - -class AssertionException(Exception): - def __str__(self): - return self.args[0] - - -def _topological_sort(names, orderings): - visited = set() - stack = [] - - def dfs(name): - visited.add(name) - if name in orderings: - for neighbor in orderings[name]: - if neighbor not in visited: - dfs(neighbor) - stack.insert(0, name) - - for name in names[::-1]: - if name not in visited: - dfs(name) - return stack - - -def resolve_all(set_success=False, report=None): - from pprint import pprint - if report is None: - report = MAIN_REPORT - _setup_assertions(report) - orderings = report['assertions']['relationships'] - phase_functions = report['assertions']['phase_functions'] - phase_names = report['assertions']['phases'] - phase_names = _topological_sort(phase_names, orderings) - # pprint(orderings) - phase_success = False - for phase_name in phase_names: - phase_success = True - for function in phase_functions[phase_name]: - try: - phase_success = phase_success and (function() is not False) - except AssertionException: - phase_success = False - except SandboxStudentCodeException: - phase_success = False - if not phase_success: - break - - # for f in report.feedback: - # print("\t", f, f.mistake, f.misconception) - if not report['assertions']['failures'] and phase_success and set_success: - report.set_success() - - _reset_phases(report) - - -def _add_phase(phase_name, function, report=None): - if report is None: - report = MAIN_REPORT - phase_functions = report['assertions']['phase_functions'] - phases = report['assertions']['phases'] - if phase_name not in phase_functions: - phase_functions[phase_name] = [] - phases.append(phase_name) - phase_functions[phase_name].append(function) - - -def _add_relationships(befores, afters, report=None): - if report is None: - report = MAIN_REPORT - relationships = report['assertions']['relationships'] - if None in (befores, afters): - return - if not isinstance(befores, (list, tuple)): - befores = [befores] - if not isinstance(afters, (list, tuple)): - afters = [afters] - for before in befores: - if not isinstance(before, str): - before = before.__name__ - if before not in relationships: - relationships[before] = [] - for after in afters: - if not isinstance(after, str): - after = after.__name__ - relationships[before].append(after) - - -def _reset_phases(report=None): - if report is None: - report = MAIN_REPORT - report['assertions']['relationships'].clear() - report['assertions']['phases'].clear() - report['assertions']['phase_functions'].clear() - report['assertions']['failures'] = 0 - - -def _setup_assertions(report): - if 'assertions' not in report: - report['assertions'] = { - 'phases': [], - 'phase_functions': {}, - 'relationships': {}, - 'exceptions': False, - 'failures': 0, - 'collected': [], - # Should we batch up multiple assertion failures? - # The grouping mechanism is try_all - 'tabular_output': False, - } - report.add_hook('source.next_section.before', resolve_all) - report.add_hook('pedal.resolvers.resolve', resolve_all) diff --git a/src/lib/pedal/assertions/tests.py b/src/lib/pedal/assertions/tests.py deleted file mode 100644 index aa8bfef6b5..0000000000 --- a/src/lib/pedal/assertions/tests.py +++ /dev/null @@ -1,147 +0,0 @@ -import string -import re - -# Number encapsulates bool, int, float, complex, decimal.Decimal, etc. -try: - from numbers import Number -except: - Number = (bool, int, float, complex) - -try: - bytes -except NameError: - bytes = str - -try: - frozenset() -except: - frozenset = tuple() - -try: - punctuation_table = str.maketrans(string.punctuation, ' ' * len(string.punctuation)) -except AttributeError: - punctuation_table = None - -if punctuation_table is None: - def strip_punctuation(a_string): - return ''.join(ch for ch in a_string if ch not in set(string.punctuation)) -else: - def strip_punctuation(a_string): - return a_string.translate(punctuation_table) - -SET_GENERATOR_TYPES = (type({}.keys()), type({}.values()), type({}.items())) - -LIST_GENERATOR_TYPES = (type(map(bool, [])), type(filter(bool, [])), - type(range(0)), type(reversed([])), type(zip()), - type(enumerate([]))) - - -def _normalize_string(a_string, numeric_endings=False): - # Lower case - a_string = a_string.lower() - # Remove trailing decimals (TODO: How awful!) - if numeric_endings: - a_string = re.sub(r"(\s*[0-9]+)\.[0-9]+(\s*)", r"\1\2", a_string) - # Remove punctuation - a_string = strip_punctuation(a_string) - # Split lines - lines = a_string.split("\n") - normalized = [[piece for piece in line.split()] - for line in lines] - normalized = [[piece for piece in line if piece] - for line in normalized if line] - return sorted(normalized) - - -def output_test(actual, expected, _exact_strings): - normalized_actual = [_normalize_string(line) for line in actual] - if isinstance(expected, (str, bytes)): - return _normalize_string(expected) in normalized_actual - else: - normalized_expected = [_normalize_string(line) for line in expected] - return all(each_actual in normalized_expected for each_actual in normalized_actual) - - -def equality_test(actual, expected, _exact_strings, _delta): - # Check if generators - if isinstance(expected, LIST_GENERATOR_TYPES): - expected = list(expected) - elif isinstance(expected, SET_GENERATOR_TYPES): - expected = set(expected) - if isinstance(actual, LIST_GENERATOR_TYPES): - actual = list(actual) - elif isinstance(actual, SET_GENERATOR_TYPES): - actual = set(actual) - - # Float comparison - if isinstance(expected, float) and isinstance(actual, (float, int)): - error = 10 ** (-_delta) - return abs(expected - actual) < error - # Other numerics - elif isinstance(expected, Number) and isinstance(actual, Number) and isinstance(expected, type(actual)): - return expected == actual - # String comparisons - elif ((isinstance(expected, str) and isinstance(actual, str)) or - (isinstance(expected, bytes) and isinstance(actual, bytes))): - if _exact_strings: - return expected == actual - else: - return _normalize_string(expected) == _normalize_string(actual) - # Exact Comparison - elif actual == expected: - return True - # Sequence comparisons - elif isinstance(expected, list) and isinstance(actual, list): - return _are_sequences_equal(actual, expected, _exact_strings, _delta) - elif isinstance(expected, tuple) and isinstance(actual, tuple): - return _are_sequences_equal(actual, expected, _exact_strings, _delta) - elif isinstance(expected, set) and isinstance(actual, set): - return _are_sets_equal(actual, expected, _exact_strings, _delta) - elif isinstance(expected, frozenset) and isinstance(actual, frozenset): - return _are_sets_equal(actual, expected, _exact_strings, _delta) - elif isinstance(expected, dict) and isinstance(actual, dict): - primary_keys = set(expected.keys()) - if not _are_sets_equal(primary_keys, set(actual.keys()), _exact_strings, _delta): - return False - for key in primary_keys: - if not equality_test(expected[key], actual[key], _exact_strings, _delta): - return False - return True - # Else - return False - - -def _are_sequences_equal(x, y, _exact_strings, _delta): - """ - For sequences that support __len__, __iter__, and should have the same - order. - """ - if len(x) != len(y): - return False - for x_element, y_element in zip(x, y): - if not equality_test(x_element, y_element, _exact_strings, _delta): - return False - return True - - -def _set_contains(needle, haystack, _exact_strings, _delta): - """ - Tests if the given needle is one of the elements of haystack, using - the _is_equal function. - """ - for element in haystack: - if equality_test(element, needle, _exact_strings, _delta): - return True - return False - - -def _are_sets_equal(x, y, _exact_strings, _delta): - """ - For sequences that support __len__, __iter__, but order does not matter. - """ - if len(x) != len(y): - return False - for x_element in x: - if not _set_contains(x_element, y, _exact_strings, _delta): - return False - return True diff --git a/src/lib/pedal/cait/__init__.py b/src/lib/pedal/cait/__init__.py deleted file mode 100644 index a01b744ec1..0000000000 --- a/src/lib/pedal/cait/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -A package of tools for capturing student code by matching it against patterns. -""" - -NAME = 'CAIT' -SHORT_DESCRIPTION = "Captures instructor code patterns within student code." -DESCRIPTION = ''' -''' -REQUIRES = ['Source'] -OPTIONALS = ['TIFA'] - -from pedal.cait.cait_api import (find_match, find_matches, - parse_program, - find_submatches, find_expr_sub_matches, - def_use_error, data_state, data_type, - expire_cait_cache) diff --git a/src/lib/pedal/cait/ast_helpers.py b/src/lib/pedal/cait/ast_helpers.py deleted file mode 100644 index 2adc042b31..0000000000 --- a/src/lib/pedal/cait/ast_helpers.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -A pretty-printing dump function for the ast module. The code was copied from -the ast.dump function and modified slightly to pretty-print. - -Alex Leone (acleone ~AT~ gmail.com), 2010-01-30 - -From http://alexleone.blogspot.co.uk/2010/01/python-ast-pretty-printer.html -""" - -from ast import AST, iter_fields, parse - - -def dump(node, annotate_fields=True, include_attributes=False, indent=' '): - """ - Return a formatted dump of the tree in *node*. This is mainly useful for - debugging purposes. The returned string will show the names and the values - for fields. This makes the code impossible to evaluate, so if evaluation is - wanted *annotate_fields* must be set to False. Attributes such as line - numbers and column offsets are not dumped by default. If this is wanted, - *include_attributes* can be set to True. - """ - - def _format(_node, level=0): - if isinstance(_node, AST): - fields = [(a, _format(b, level)) for a, b in iter_fields(_node)] - if include_attributes and _node._attributes: - fields.extend([(a, _format(getattr(_node, a), level)) - for a in _node._attributes]) - return ''.join([ - _node.__class__.__name__, - '(', - ', '.join(('%s=%s' % field for field in fields) - if annotate_fields else - (b for a, b in fields)), - ')']) - elif isinstance(_node, list): - lines = ['['] - lines.extend((indent * (level + 2) + _format(x, level + 2) + ',' - for x in _node)) - if len(lines) > 1: - lines.append(indent * (level + 1) + ']') - else: - lines[-1] += ']' - return '\n'.join(lines) - return repr(_node) - - if not isinstance(node, AST): - raise TypeError('expected AST, got %r' % node.__class__.__name__) - return _format(node) - - -def parseprint(code, filename="", mode="exec", **kwargs): - """Parse some code from a string and pretty-print it.""" - node = parse(code, mode=mode) # An ode to the code - print(dump(node, **kwargs)) - - -# Short name: pdp = parse, dump, print -pdp = parseprint diff --git a/src/lib/pedal/cait/ast_map.py b/src/lib/pedal/cait/ast_map.py deleted file mode 100644 index 940f54876e..0000000000 --- a/src/lib/pedal/cait/ast_map.py +++ /dev/null @@ -1,275 +0,0 @@ -from pedal.cait.cait_node import CaitNode - - -class AstSymbol: - """ - This represents an Ast symbol, whether it be a variable (name node) or a function name - for place holders used in instructor patterns - - Notes: - Also has the attributes of the relevant Name node from the ast class. - - Attributes: - id (str): the name of the variable place holder used by the instructor - ast_node (cait_node): the ast node of the variable - """ - - def __init__(self, _id="", _node=None): - self.id = _id - self.astNode = _node - self.ast_node = _node - - def __getattr__(self, attr): - return getattr(self.astNode, attr) - - def __str__(self): - # return ''.join(["id = ", self.id.__str__(), ", astNode = ", type(self.astNode).__name__]) - return self.id - - def __repr__(self): - return ''.join(["id = ", self.id.__str__(), ", astNode = ", type(self.astNode).__name__]) - - -class AstSymbolList: - """ - This class is a wrapper for a list of AstSymbols for ease of access - If accessed as a list, manipulable as a list, otherwise, acts as the first AstSymbol in the list - """ - - def __init__(self): - self.my_list = [] - - def __getitem__(self, item): - return self.my_list.__getitem__(item) - - def append(self, item): - self.my_list.append(item) - - def __getattr__(self, attr): - return getattr(self.my_list[0], attr) - - def __len__(self): - return self.my_list.__len__() - - -class AstMap: - def __init__(self): - self.mappings = {} - self.symbol_table = {} - self.exp_table = {} - self.func_table = {} - self.conflict_keys = [] - self.match_root = None - self.diagnosis = "" - - def add_func_to_sym_table(self, ins_node, std_node): - """ - Adds ins_node.name to the symbol table if it doesn't already exist, mapping it to a set of ins_node. Updates a - second dictionary that maps ins_node to an std_node, and overwrites the current std_node since there should only - be one mapping. - - Args: - ins_node: instructor node or str representing a function name - std_node: student node representing function - - Returns: - int: number of conflicts generated - - """ - if not isinstance(std_node, CaitNode): - raise TypeError - if isinstance(ins_node, str): - key = ins_node - else: - try: - if ins_node.ast_name == "FunctionDef": - key = ins_node.astNode.name - else: # TODO: Little skulpt artifact that doesn't raise Attribute Errors... - key = ins_node._id - raise AttributeError - except AttributeError: - key = ins_node.astNode._id - - try: - if std_node.ast_name == "FunctionDef": - value = AstSymbol(std_node.astNode.name, std_node) - else: # TODO: Little skulpt artifact that doesn't raise Attribute Errors... - raise AttributeError - # value = AstSymbol(std_node.astNode.name, std_node) - except AttributeError: - node = std_node - if type(node.astNode).__name__ != "Call": - node = node.parent - node._id = std_node._id - value = AstSymbol(std_node._id, node) - if key in self.func_table: - new_list = self.func_table[key] - if value not in new_list: - new_list.append(value) - if not (key in self.conflict_keys): - for other in new_list: - if value.id != other.id: - self.conflict_keys.append(key) - break - else: - new_list = AstSymbolList() - new_list.append(value) - - self.func_table[key] = new_list - return len(self.conflict_keys) - - def add_var_to_sym_table(self, ins_node, std_node): - """ - Adds ins_node._id to the symbol table if it doesn't already exist, mapping it to a set of ins_node. Updates a - second dictionary that maps ins_node to an std_node, and overwrites the current std_node since there should only - be one mapping. - - Args: - ins_node: instructor node or str representing variable - std_node: student node representing variable - - Returns: - int: number of conflicts generated - - """ - if not isinstance(std_node, CaitNode): - raise TypeError - if isinstance(ins_node, str): - key = ins_node - else: - key = ins_node.astNode._id - value = AstSymbol(std_node.astNode._id, std_node) - if key in self.symbol_table: - new_list = self.symbol_table[key] - new_list.append(value) - if not (key in self.conflict_keys): - for other in new_list: - if value._id != other._id: - self.conflict_keys.append(key) - break - else: - new_list = AstSymbolList() - new_list.append(value) - - self.symbol_table[key] = new_list - return len(self.conflict_keys) - - def add_exp_to_sym_table(self, ins_node, std_node): - """ - Adds mapping of expression symbol to student node - This function does NOT check for conflicts at the moment and probably should at some point. - TODO: Check for conflicts - Args: - ins_node: Instructor node representing an expression - std_node: student ast subtree corresponding to the symbol - - Returns: - None - """ - if not isinstance(std_node, CaitNode): - raise TypeError - self.exp_table[ins_node.astNode.id] = std_node - - def add_node_pairing(self, ins_node, std_node): - """ - Adds a mapping of instructor ast node to a specific student ast node - Args: - ins_node: instructor pattern ast node - std_node: student ast node - - Returns: - None - """ - if not isinstance(std_node, CaitNode): - raise TypeError - self.mappings[ins_node] = std_node - - def has_conflicts(self): - """ - - Returns: - bool: True if number of conflicts is greater than 0 - """ - return len(self.conflict_keys) > 0 - - def new_merged_map(self, other): - """ - Returns a newly merged map consisting of this and other - without modifying self. - Args: - other (AstMap): the other AstMap to be merged with - - Returns: - AstMap: self modified by adding the contents of other - """ - new_map = AstMap() - new_map.merge_map_with(self) - new_map.merge_map_with(other) - return new_map - - def merge_map_with(self, other): - """ - Returns a newly merged map consisting of this and other - by modifying self - Args: - other (AstMap): the other AstMap to be merged with - - Returns: - AstMap: self modified by adding the contents of other - """ - if other is None: - return - - if not isinstance(other, type(self)): - raise TypeError - - # merge all mappings - self.mappings.update(other.mappings) - - # merge all expressions - self.exp_table.update(other.exp_table) - - # merge all symbols - for key, value in other.symbol_table.items(): - for sub_value in value: - self.add_var_to_sym_table(key, sub_value.astNode) - - # merge all functions - for key, value in other.func_table.items(): - for sub_value in value: - self.add_func_to_sym_table(str(key), sub_value.astNode) - - @property - def match_lineno(self): - """ - - Returns: - int: the line number this match started on - """ - values = [v.lineno for v in self.mappings.values() - if v.lineno is not None] - if not values: - return -1 - else: - return min(values) - - def __getitem__(self, id_n): - if id_n.startswith('__'): - expression = self.exp_table[id_n] - expression.map = self - return expression - else: - if id_n in self.symbol_table: - return self.symbol_table[id_n] - else: - return self.func_table[id_n] - - def __contains__(self, id_n): - if id_n.startswith('__'): - return id_n in self.exp_table - else: - exists = id_n in self.symbol_table - if exists: - return exists - else: - return id_n in self.func_table diff --git a/src/lib/pedal/cait/cait_api.py b/src/lib/pedal/cait/cait_api.py deleted file mode 100644 index 8dff0a0308..0000000000 --- a/src/lib/pedal/cait/cait_api.py +++ /dev/null @@ -1,272 +0,0 @@ -from pedal.report import MAIN_REPORT -from pedal.cait.stretchy_tree_matching import StretchyTreeMatcher -from pedal.cait.cait_node import CaitNode -import ast - - -class CaitException(Exception): - pass - - -""" -CaitReport: - A collection of information from the latest CAIT run. - - Attrs: - ast: The CaitNode tree that was most recently parsed out. - cache[str:CaitNode]: A dictionary mapping student code (str) to - parsed representations. - success: Whether there have been any errors so far. - error: The exception that occurred, or None if no exception so far. -""" - - -def _parse_source(code, cait_report): - """ - Parses the given code and returns its Cait representation. If the parse was - unsuccessful, it attaches the error to the report. - - Args: - code (str): A string of Python code. - cait_report (dict): A Cait Report to store information in. - Returns: - AstNode: The parsed AST reprensetation, or None - """ - try: - parsed = ast.parse(code) - except SyntaxError as e: - cait_report['success'] = False - cait_report['error'] = e - return ast.parse("") - return parsed - - -def _load_cait(student_code, report): - """ - Retrieves the current report for CAIT. If there is no CAIT report, it will - generate one. If source code is given, that will be used instead of the - report's source code. - - Args: - student_code (str): The code to parse into the a CaitNode tree. If - None, then it will use the code in the report's Source tool. - report (Report): The report to attach data to. - - Returns: - dict: Returns the Cait Report - """ - if 'cait' not in report: - report['cait'] = {'success': True, 'error': None, - 'ast': None, 'cache': {}} - cait = report['cait'] - if student_code is not None: - if student_code in cait['cache']: - cait['ast'] = cait['cache'][student_code] - return cait - else: - student_ast = _parse_source(student_code, cait) - elif report['source']['success']: - student_code = report['source']['code'] - if student_code in cait['cache']: - cait['ast'] = cait['cache'][student_code] - return cait - else: - student_ast = report['source']['ast'] - else: - report.attach("Unparsable Source", tool='cait', - category='analyzer') - cait['success'] = False - cait['ast'] = CaitNode(ast.parse(""), report=report) - return cait - cait['ast'] = cait['cache'][student_code] = CaitNode(student_ast, report=report) - return cait - - -def require_tifa(self): - """ - Confirms that TIFA was run successfully, otherwise raises a - CaitException. - """ - if not self.report['tifa']['success']: - raise CaitException("TIFA was not run prior to CAIT.") - - -# noinspection PyBroadException -def parse_program(student_code=None, report=None): - """ - Parses student code and produces a CAIT representation. - - Args: - student_code (str): The student source code to parse. If None, defaults - to the code within the Source tool of the given Report. - report (Report): The report to attach data to. Defaults to MAIN_REPORT. - - Returns: - CaitNode: A CAIT-enhanced representation of the root Node. - """ - if report is None: - report = MAIN_REPORT - cait_report = _load_cait(student_code, report) - return cait_report['ast'] - - -def expire_cait_cache(report=None): - """ - Deletes the most recent CAIT run and any cached CAIT parses. - - Args: - report (Report): The report to attach data to. Defaults to MAIN_REPORT. - """ - if report is None: - report = MAIN_REPORT - report['cait']['ast'] = None - report['cait']['cache'] = {} - - -def def_use_error(node, report=None): - """ - Checks if node is a name and has a def_use_error - - Args: - node (str or AstNode or CaitNode): The Name node to look up. - report (Report): The report to attach data to. Defaults to MAIN_REPORT. - Returns: - True if the given name has a def_use_error - """ - if report is None: - report = MAIN_REPORT - if not isinstance(node, str) and node.ast_name != "Name": - raise TypeError - try: - def_use_vars = report['tifa']['issues']['Initialization Problem'] - except KeyError: - return False - if not isinstance(node, str): - node_id = node.id - else: - node_id = node - has_error = False - for issue in def_use_vars: - name = issue['name'] - if name == node_id: - has_error = True - break - return has_error - - -# noinspection PyBroadException -def data_state(node, report=None): - """ - Determines the Tifa State of the given node. - - Args: - node (str or AstNode or CaitNode): The Name node to look up in TIFA. - report (Report): The report to attach data to. Defaults to MAIN_REPORT. - Returns: - The State of the object (Tifa State) or None if it doesn't exist - """ - if report is None: - report = MAIN_REPORT - if not isinstance(node, str) and node.ast_name != "Name": - raise TypeError - if isinstance(node, str): - node_id = node - else: - node_id = node.id - try: - return report['tifa']["top_level_variables"][node_id] - except KeyError: - return None - - -def data_type(node, report=None): - """ - Looks up the type of the node using Tifa's analysis. - - Args: - node (str or AstNode or CaitNode): The Name node to look up in TIFA. - report (Report): The report to attach data to. Defaults to MAIN_REPORT. - Returns: - The type of the object (Tifa type) or None if a type doesn't exist - """ - state = data_state(node, report=report) - if state is not None: - return state.type - return None - - -def find_match(pattern, student_code=None, report=None, cut=False): - """ - Apply Tree Inclusion and return the first match of the `pattern` in the - `student_code`. - - Args: - pattern (str): The CaitExpression to match against. - student_code (str): The string of student code to check against. - Defaults to the code of the Source tool in the Report. - report (Report): The report to attach data to. - cut (bool): Set to true to trim root to first branch - Returns: - CaitNode or None: The first matching node for the given pattern, or - None if nothing was found. - """ - matches = find_matches(pattern=pattern, student_code=student_code, - report=report, cut=cut) - if matches: - return matches[0] - else: - return None - - -def find_matches(pattern, student_code=None, report=None, cut=False): - """ - Apply Tree Inclusion and return all matches of the `pattern` in the - `student_code`. - - Args: - pattern (str): The CaitExpression to match against. - student_code (str): The string of student code to check against. - report (Report): The report to attach data to. - cut (bool): Set to true to trim root to first branch - Returns: - List[CaitNode]: All matching nodes for the given pattern. - """ - if report is None: - report = MAIN_REPORT - cait_report = _load_cait(student_code, report) - if not cait_report['success']: - return [] - student_ast = cait_report['ast'] - matcher = StretchyTreeMatcher(pattern, report=report) - return matcher.find_matches(student_ast) - - -def find_submatches(pattern, student_code, is_mod=False): - """ - Incomplete. - """ - return find_expr_sub_matches(pattern, student_code, is_mod) - - -def find_expr_sub_matches(pattern, student_code, is_mod=False, report=None): - """ - Finds pattern in student_code - # TODO: Add code to make pattern accept CaitNodes - # TODO: Make this function without so much meta knowledge - Args: - pattern: the expression to find (str that MUST evaluate to a Module node with a single child or an AstNode) - student_code: student subtree - is_mod (bool): currently hack for multiline sub matches - report: defaults to MAIN_REPORT unless another one exists - Returns: - a list of matches or False if no matches found - """ - if report is None: - report = MAIN_REPORT - is_node = isinstance(pattern, CaitNode) - if not isinstance(pattern, str) and not is_node: - raise TypeError("pattern expected str or CaitNode, found {0}".format(type(pattern))) - matcher = StretchyTreeMatcher(pattern, report=report) - if (not is_node and not is_mod) and len(matcher.root_node.children) != 1: - raise ValueError("pattern does not evaluate to a singular statement") - return matcher.find_matches(student_code, check_meta=False) diff --git a/src/lib/pedal/cait/cait_node.py b/src/lib/pedal/cait/cait_node.py deleted file mode 100644 index e64509fb0d..0000000000 --- a/src/lib/pedal/cait/cait_node.py +++ /dev/null @@ -1,516 +0,0 @@ -import ast -from pedal.cait.ast_helpers import dump -from types import MethodType -from pedal.report import MAIN_REPORT - - -class CaitNode: - """ - A wrapper class for AST nodes. Linearizes access to the children of the ast - node and saves the field this AST node - originated from. - - Attributes: - ast_name (str): The name of the original AstNode (e.g., "Name" or - "FunctionDef") - - TODO: May want to just add fields and methods to the existing AST nodes and - use a production pattern instead. - """ - - def __init__(self, ast_node, my_field='', tid=0, lin_tree=None, - ancestor=None, report=None): - """ - - Args: - ast_node (ast_node): The AST node to be wrapped - my_field (str): the field of the parent node that produced this child. - tid (int): the tree id - lin_tree list of cait_node: A linear version of the tree - ancestor (cait_node): The parent of this node - report: The report associated with this particular match. - """ - if report is None: - report = MAIN_REPORT - self.report = report - self.children = [] - self.astNode = ast_node - self.field = my_field - self.tree_id = tid - self.parent = ancestor - if lin_tree is None: - self.linear_tree = [self] - else: - lin_tree.append(self) - self.linear_tree = lin_tree - - # reference to the easy node wrapping the ast_node - setattr(ast_node, 'cait_node', self) - - tid_count = tid - - my_field_generator = ast.iter_fields(self.astNode) - for item in my_field_generator: - field, value = item - # if the field doesn't have a value, no child exists - if value is None: - continue - - # If the children are not in an array, wrap it in an array for - # consistency in the code the follows - if not isinstance(value, list): - value = [value] - - # Reference ast_node_visitor.js for the original behavior and keep note of it for the purposes of handling - # the children noting the special case when the nodes of the array are actually parameters of the node - # (e.g. a load function) instead of a child node - for sub_value in value: - if isinstance(sub_value, ast.AST): - new_child = CaitNode(sub_value, my_field=field, - tid=tid_count + 1, - lin_tree=self.linear_tree, - ancestor=self, - report=self.report) - self.children.append(new_child) - tid_count = len(self.linear_tree) - 1 - - def __str__(self): - return ''.join([self.field, "\n", dump(self.astNode)]) - - def numeric_logic_check(self, mag, expr): - """ - If this node is a Compare or BoolOp node, sees if the logic in expr (a javascript string being a logical - statement) matches the logic of self. This assumes that we are only comparing numerical values to a single - variable - TODO: modify this to take multiple variables - TODO: modify to support more than +, -, *, and / BinOps - TODO: modify to support unary operators other than USub and Not - TODO: This is very finicky and buggy, try not to use it - Args: - mag (float): the order of magnitude that should be added to numbers to check logic, 1 is usually a good value, - especially when working with the set of integers. - expr (Compare or BoolOp): the "Compare" or "BoolOp" tree to check self against - - Returns: - bool: True if self (typically student node) and expr are equivalent boolean expressions - """ - - def eval_unop(unop_num, unop_node): - operand = eval_selector(unop_num, unop_node.operand) - op = unop_node.op_name - - return {"USub": -operand, - "Not": not operand}[op] - - def eval_binop(binop_num, binop_node): - left = eval_selector(binop_num, binop_node.left) - right = eval_selector(binop_num, binop_node.right) - op = binop_node.op_name - - return { - "Add": left + right, - "Sub": left - right, - "Mult": left * right, - "Div": left / right}[op] - - def eval_selector(op_num, op_expr): - op_expr = op_num if op_expr.ast_name == "Name" else op_expr - if isinstance(op_expr, (int, float)): - return op_expr - if op_expr.ast_name == "BinOp": - return eval_binop(op_num, op_expr) - if op_expr.ast_name == "UnaryOp": - return eval_unop(op_num, op_expr) - if op_expr.ast_name == "Num": - return op_expr.n - raise NotImplementedError - - def eval_bool_comp(num_list, comp_ast): - ops = comp_ast.ops_names - comps = comp_ast.comparators - results = [] - current = comp_ast.left - left = current - - for num_i in num_list: - result = True - for op, comp in zip(ops, comps): - current = eval_selector(num_i, current) - comp_p = eval_selector(num_i, comp) - - res = { - "Eq": current == comp_p, - "NotEq": current != comp_p, - "Lt": current < comp_p, - "LtE": current <= comp_p, - "Gt": current > comp_p, - "GtE": current >= comp_p, - }[op] - current = comp - result = result and res - if not result: - break - results.append(result) - current = left - return results - - def eval_boolop(num_list, boolop_ast): - boolop = boolop_ast.op_name - values = boolop_ast.values - results_c = None - is_and = boolop == "And" - for value in values: - if value.ast_name == "Compare": - results = eval_bool_comp(num_list, value) - else: # should be boolop - results = eval_boolop(num_list, value) - if results_c is None: - results_c = results - else: # compile results - new_result = [] - for result1, result2 in zip(results_c, results): - if is_and: - new_result.append(result1 and result2) - else: - new_result.append(result1 or result2) - results_c = new_result - return results_c - - try: - ins_expr = CaitNode(ast.parse(expr), report=self.report).body[0].value - ins_nums = ins_expr.find_all("Num") - std_nums = self.find_all("Num") - test_nums = [] - for num in ins_nums: - raw_num = num.n - test_nums.append(raw_num) - test_nums.append(raw_num + mag) - test_nums.append(raw_num - mag) - for num in std_nums: - raw_num = num.n - test_nums.append(raw_num) - test_nums.append(raw_num + mag) - test_nums.append(raw_num - mag) - - if self.ast_name == "Compare": - std_res = eval_bool_comp(test_nums, self) - elif self.ast_name == "BoolOp": - std_res = eval_boolop(test_nums, self) - else: - return False - - if ins_expr.ast_name == "Compare": - ins_res = eval_bool_comp(test_nums, ins_expr) - elif ins_expr.ast_name == "BoolOp": - ins_res = eval_boolop(test_nums, ins_expr) - else: - raise TypeError - return ins_res == std_res - except Exception: - return False - - def get_next_tree(self): - """Gets the next tree in the AST - This method gets the next AST node that is of equal or higher level than self. Returns None if the end of the - tree is reached - TODO: Create a get sibling method. - - Returns: - cait_node: The next tree in the AST - - """ - - # adding function to track tree ids - def visit_counter(self, node): - self.counter += 1 - self.generic_visit(node) - - node_counter = ast.NodeVisitor() - setattr(node_counter, 'counter', self.tree_id) - node_counter.visit = MethodType(visit_counter, node_counter) - - # getting ids - node_counter.visit(self.astNode) - out_of_tree = node_counter.counter >= len(self.linear_tree) # check if out of bounds - # len(self.children) > 0 and self.children[-1] == node_counter - if out_of_tree: - return None - return self.linear_tree[node_counter.counter] - - def get_child(self, node): - """ - - Args: - node: a non-CaitNode ast node - - Returns: - cait_node: the corresponding cait_node to the child - """ - if isinstance(node, ast.AST): - for child in self.children: - if child.astNode == node: - return child - elif isinstance(node, int): - return self.children(node) - return None - - @staticmethod - def get_ast_name(node): - return type(node).__name__ - - def get_clashing_attr(self, key): - if key == "value": - return self.get_value() - - def __getattr__(self, item): - key = item - """ - Non-ast node attributes based on ast_node attributes - """ - node_name = CaitNode.get_ast_name(self.astNode) - if node_name == "Assign" and key == "target": - key = "targets" - if item in AST_SINGLE_FUNCTIONS: - key = item[:-5] # strip suffix '_name' - if item in AST_ARRAYS_OF_FUNCTIONS: - key = item[:-6] # strip suffix '_names' - - """ - Non-ast node attributes - """ - if key == 'next_tree': - return self.get_next_tree() - if key == 'ast_name': - return node_name - elif key == '_name': - return self.astNode.name - elif key == 'ast_node': - return self.astNode - else: # ast node attributes or derivative attributes - if hasattr(self.astNode, key): - # noinspection PyBroadException - try: - field = self.astNode.__getattribute__(key) - except Exception: - field = None - if node_name == "Assign" and item != key: - if item == "target": - return field[0].cait_node # Get's the relevant ast node - elif item == "targets" and isinstance(field, list): - easy_array = [] - for node in field: - easy_array.append(node.cait_node) - return easy_array - else: - return field - elif item in AST_SINGLE_FUNCTIONS: - return type(field).__name__ - elif item in AST_ARRAYS_OF_FUNCTIONS: - str_ops_list = [] - for op in field: - str_ops_list.append(type(op).__name__) - return str_ops_list - elif isinstance(field, ast.AST): - return field.cait_node - elif isinstance(field, list): - try: - return [f.cait_node for f in field] - except AttributeError: - # This can only happen in NonLocals, which has a list - # of raw strings in the `names` property - return field - else: - return field - else: # get added field that may have existed for different node types - return self.get_clashing_attr(key) - - def find_matches(self, pattern, is_mod=False, check_meta=True, use_previous=True): - """ - Retrieves any patterns that match against this CaitNode. Expected to be - used for subpattern matching. - Args: - pattern (CaitNode/str): The pattern searched for in this cait_node - is_mod (bool): Assumes that this node is a module node - check_meta (bool): Checks if meta information of the nodes also matches (e.g. instead of just checking children, - also checking whether the children have the same field name in both the pattern and the - source - use_previous (bool): If True, the match will be searched while inheriting the symbol table of the parent. This - means that variable consistency must be maintained between parent and child matches. - - Returns: - :obj: 'list' of :'obj': AstMap: a list of matches - - """ - # Avoid circular import - import pedal.cait.stretchy_tree_matching as stm - is_node = isinstance(pattern, CaitNode) - if not isinstance(pattern, str) and not is_node: - raise TypeError("pattern expected str or CaitNode, found {0}".format(type(pattern))) - matcher = stm.StretchyTreeMatcher(pattern, report=self.report) - if (not is_node and not is_mod) and len(matcher.root_node.children) != 1: - raise ValueError("pattern does not evaluate to a singular statement") - prev_match = self.map if use_previous else None - return matcher.find_matches(self, check_meta=check_meta, pre_match=prev_match) - - def find_match(self, pattern, is_mod=False, check_meta=True, use_previous=True): - matches = self.find_matches(pattern, is_mod, check_meta=check_meta, use_previous=use_previous) - if len(matches) != 0: - return matches[0] - return None - - def find_all(self, node_type): - """Finds all nodes defined by string node_type - - Args: - node_type: the string representing the "type" of node to look for - - Returns: - a list of Ast Nodes (cait_nodes) of self that are of the specified type (including self if self - meets that criteria) - """ - items = [] - visitor = ast.NodeVisitor() - # setattr(visitor, "current_id", self.tree_id - 1) - setattr(visitor, "items", items) - func_name = 'visit_' + node_type - - def main_visit(self, node): - self.items.append(node.cait_node) - return self.generic_visit(node) - - func_ref = main_visit - setattr(visitor, func_name, MethodType(func_ref, visitor)) - visitor.visit(self.astNode) - return visitor.items - - def has(self, node): - """ - Determine if this node has the given `node`. - """ - if isinstance(node, (int, float)): - visitor = ast.NodeVisitor() - has_num = [] - - def visit_Num(self, potential): - has_num.append(node == potential.n) - return self.generic_visit(potential) - - visitor.visit_Num = MethodType(visit_Num, visitor) - visitor.visit(self.astNode) - return any(has_num) - elif node.ast_name != "Name": - return False - visitor = ast.NodeVisitor() - has_name = [] - - def visit_Name(self, potential): - has_name.append(node.id == potential.id) - return self.generic_visit(potential) - - visitor.visit_Name = MethodType(visit_Name, visitor) - visitor.visit(self.astNode) - return any(has_name) - - def is_before(self, other): - """ - Uses tree id to check if self node came before other. - Args: - other (cait_node): the other node to compare to - - Returns: - bool: True if self is before other - """ - try: - return self.tree_id < other.tree_id and self.linear_tree == other.linear_tree - except Exception: - raise TypeError - - def is_ast(self, ast_name): - """ - Checks self is the type of the specified ast node - Args: - ast_name (str): The name of the ast node type - - Returns: - bool: True if this node's ast name matches the specified one - """ - if not isinstance(ast_name, str): - ast_name = CaitNode.get_ast_name(ast_name.astNode) - return CaitNode.get_ast_name(self.astNode).lower() == ast_name.lower() - - def is_method(self): - """ - Checks if self is a method - - Returns: - bool: True if I'm a FunctionDef, and if any of my parents are ClassDef. - """ - # Check if I'm a FunctionDef, and if any of my parents are ClassDef. - if self.ast_name != "FunctionDef": - return False - current = self.parent - while current is not None: - if current.ast_name == "ClassDef": - return True - # Don't treat closures as methods - elif current.ast_name == "FunctionDef": - return False - current = current.parent - return False - - def get_data_state(self): - """ - Gets the data_state object of self - - Returns: - data_state or None: returns data_state if self is a name and exists, otherwise None - """ - if self.ast_name != "Name": - return None - try: - return self.report['tifa']["top_level_variables"][self.id] - except KeyError: - return None - - def get_data_type(self): - """ - - Returns: - type of the variable associated with this node if it's a name node, otherwise None. - """ - state = self.get_data_state() - if state is None: - return None - else: - return state.type - - def was_type(self, tp): - """ - - Returns: - type of the variable associated with this node if it's a name node, otherwise None. - """ - state = self.get_data_state() - if state is None: - return None - else: - return state.was_type(tp) - - def get_value(self): - """" - Returns: - Value of node if Num or Str, and get_data_state if Name - """ - value = None - if self.is_ast("Num"): - value = self.n - elif self.is_ast("Str"): - value = self.s - elif self.is_ast("Name"): - # TODO: Decide on what this should return... - value = self.id - return value - - -AST_SINGLE_FUNCTIONS = ["ctx_name", "op_name"] -AST_ARRAYS_OF_FUNCTIONS = ["ops_names"] diff --git a/src/lib/pedal/cait/stretchy_tree_matching.py b/src/lib/pedal/cait/stretchy_tree_matching.py deleted file mode 100644 index e18cae37af..0000000000 --- a/src/lib/pedal/cait/stretchy_tree_matching.py +++ /dev/null @@ -1,666 +0,0 @@ -import ast -import re -from pedal.cait.ast_map import AstMap -from pedal.cait.cait_node import CaitNode - -# "Enums" for _name_regex -_VAR = "var" -_EXP = "exp" -_WILD = "wild" -_NONE_FIELD = "none" - - -def is_primitive(item): - """ - Determines if the given item is a primitive value (either an int, float, - str, bool, or None). - - Args: - item (any): Any value - Returns: - bool: Whether the item is a primitive value. - """ - return isinstance(item, (int, float, str, bool)) or item is None - - -def _name_regex(name_id): - var_match = re.compile('^_[^_].*_$') # /regex - exp_match = re.compile('^__.*__$') # /regex - wild_card = re.compile('^___$') # /regex - return {_VAR: var_match.match(name_id), - _EXP: exp_match.match(name_id), - _WILD: wild_card.match(name_id)} - - -class StretchyTreeMatcher: - def __init__(self, ast_or_code, report, filename="__main__"): - """ - The StretchyTreeMatcher is used to compare a pattern against some - student code. It produces a set of potential mappings between them. - - Args: - ast_or_code (str or AstNode): The students' code or a valid AstNode from - `ast.parse`. If the code has invalid syntax, a SyntaxError - will be raised. - filename (str): The filename to parse with - only used for error - reporting. - report (Report): A report to obtain data from. - """ - self.report = report - if isinstance(ast_or_code, str): - ast_node = ast.parse(ast_or_code, filename) - else: - ast_node = ast_or_code - # Build up root - if ast_node is None: - self.root_node = None - elif isinstance(ast_node, CaitNode): - self.root_node = ast_node - else: - self.root_node = CaitNode(ast_node, _NONE_FIELD, report=self.report) - - def find_matches(self, ast_or_code, filename="__main__", check_meta=True, pre_match=None): - """ - Args: - ast_or_code (str or AstNode): The students' code or a valid AstNode from - `ast.parse`. If the code has invalid syntax, a SyntaxError - will be raised. - filename (str): The filename to parse with - only used for error - reporting. - check_meta (bool): Determine if the nodes came from the same AST - field. - Returns: - list[AstMap]: A list of AstMaps that are suitable matches. - """ - if isinstance(ast_or_code, str): - other_tree = CaitNode(ast.parse(ast_or_code, filename), report=self.report) - elif isinstance(ast_or_code, CaitNode): - other_tree = ast_or_code - else: - other_tree = CaitNode(ast_or_code, _NONE_FIELD, report=self.report) - explore_root = self.root_node - trim_set = ["Expr", "Module"] - explore_root_old_field = explore_root.field - if self.root_node is not None: # Trimming ins_node - while (len(explore_root.children) == 1 and - explore_root.ast_name in trim_set): - explore_root.field = explore_root_old_field - explore_root = explore_root.children[0] - explore_root_old_field = explore_root.field - explore_root.field = _NONE_FIELD - other_root = other_tree - other_root_old_field = other_root.field - if other_root is not None: # Trimming std_node - while len(other_root.children) == 1 and other_root.ast_name in trim_set: - other_root.field = other_root_old_field - other_root = other_root.children[0] - other_root_old_field = other_root.field - other_root.field = _NONE_FIELD - matches = self.any_node_match(explore_root, other_root, - check_meta=check_meta, pre_match=pre_match) - explore_root.field = explore_root_old_field - other_root.field = other_root_old_field - return matches - - def any_node_match(self, ins_node, std_node, check_meta=True, cut=False, pre_match=None): - """ - Finds whether ins_node can be matched to some node in the tree std_node - - Args: - ins_node: - std_node: - check_meta: - cut: - - Returns: - list of AstMaps: a mapping of nodes and a symbol table mapping ins_node to - some node in the tree std_node or False if such a matching does not - exist - """ - # @TODO: create a more public function that converts ins_node and std_node into CaitNodes - # TODO: Create exhaustive any_node_match - # matching: an object representing the mapping and the symbol table - matching = self.deep_find_match(ins_node, std_node, check_meta, pre_match=pre_match) - # if a direct matching is found - if matching: - for match in matching: - match.match_root = std_node - else: - matching = [] - # return matching # return it - # if not matching or exhaust: # otherwise - # try to matching ins_node to each child of std_node, recursively - for std_child in std_node.children: - matching_c = self.any_node_match(ins_node, std_child, check_meta=check_meta, cut=cut, pre_match=pre_match) - if matching_c: - for match in matching_c: - match.match_root = std_child - # return matching - matching = matching + matching_c - if len(matching) > 0: - return matching - return [] - - def deep_find_match(self, ins_node, std_node, check_meta=True, - pre_match=None): - """ - Finds whether ins_node and matches std_node and whether ins_node's children flexibly match std_node's children - in order - Args: - ins_node: The instructor ast that should be included in the student AST - std_node: The student AST that we are searching for the included tree - check_meta: Flag, if True, check whether the two nodes originated from the same ast field - pre_match: If this was part of a previous match... - - Returns: - a mapping of nodes and a symbol table mapping ins_node to std_node, or [] if no mapping was found - """ - method_name = "deep_find_match_" + type(ins_node.astNode).__name__ - target_func = getattr(self, method_name, self.deep_find_match_generic) - return target_func(ins_node, std_node, check_meta, pre_match=pre_match) - - # noinspection PyPep8Naming - def deep_find_match_Name(self, ins_node, std_node, check_meta=True, pre_match=None): - name_id = ins_node.astNode.id - match = _name_regex(name_id) - mapping = AstMap() - matched = False - meta_matched = self.metas_match(ins_node, std_node, check_meta) - if match[_VAR] and meta_matched: # if variable - if type(std_node.astNode).__name__ == "Name": - return self.deep_find_match_generic(ins_node, std_node, - check_meta=check_meta, ignores=["ctx"], pre_match=pre_match) - # could else return False, but shallow_match_generic should do this as well - elif match[_EXP]: # and meta_matched: # if expression - # terminate recursion, the whole subtree should match since expression nodes match to anything - mapping.merge_map_with(pre_match) - mapping.add_exp_to_sym_table(ins_node, std_node) - matched = True - elif match[_WILD] and meta_matched: # if wild card, don't care - # terminate the recursion, the whole subtree should match since wild cards match to anything - matched = True - - if matched: - mapping.add_node_pairing(ins_node, std_node) - return [mapping] - # else - return self.deep_find_match_generic(ins_node, std_node, - check_meta=check_meta, ignores=["ctx"], pre_match=pre_match) - - # noinspection PyPep8Naming - def deep_find_match_BinOp(self, ins_node, std_node, check_meta=True, pre_match=None): - op = ins_node.astNode.op - op = type(op).__name__ - is_generic = not (op == "Mult" or op == "Add") - if is_generic: - return self.deep_find_match_generic(ins_node, std_node, check_meta, pre_match=pre_match) - else: # this means that the node is clearly commutative - return self.deep_find_match_binflex(ins_node, std_node, False, pre_match=pre_match) - - # noinspection PyMethodMayBeStatic - def binflex_helper(self, case_left, case_right, new_mappings, base_mappings, pre_match=None): - """ - adds to new_mappings (return/modify by argument) the mappings for both the left and right subtrees as denoted by - case_left and case_right - Args: - case_left: The mappings for the left opperand - case_right: The mappings for the right opperand - new_mappings: The new set of mappings to generate - base_mappings: The original mappings of the binop node - pre_match: A mapping passed down from an initial match - Returns: - None - """ - if case_left and case_right: - for case_l in case_left: - new_map = base_mappings[0].new_merged_map(case_l).new_merged_map(pre_match) - for case_r in case_right: - both = new_map.new_merged_map(case_r) - if not both.has_conflicts(): - new_mappings.append(both) - - def deep_find_match_binflex(self, ins_node, std_node, check_meta=False, pre_match=None): - base_mappings = self.shallow_match(ins_node, std_node, check_meta) - if not base_mappings: - return [] - op_mappings = self.shallow_match(ins_node.children[1], std_node.children[1], check_meta=True) - if not op_mappings: - return [] - base_mappings = [base_mappings[0].new_merged_map(op_mappings[0])] - - if base_mappings: - ins_left = ins_node.children[0] # instructor left ast node - ins_right = ins_node.children[2] # instructor right ast node - std_left = std_node.children[0] # student left ast node - std_right = std_node.children[2] # student right ast node - new_mappings = [] - # case 1: ins_left->std_left and ins_right->std_right - case_left = self.deep_find_match(ins_left, std_left, False) - case_right = self.deep_find_match(ins_right, std_right, False) - self.binflex_helper(case_left, case_right, new_mappings, base_mappings, pre_match=pre_match) - # case 2: ins_left->std_right and ins_right->std_left - case_left = self.deep_find_match(ins_left, std_right, False) - case_right = self.deep_find_match(ins_right, std_left, False) - self.binflex_helper(case_left, case_right, new_mappings, base_mappings, pre_match=pre_match) - if len(new_mappings) == 0: - return [] - return new_mappings - return [] - - def deep_find_match_Expr(self, ins_node, std_node, check_meta=True, pre_match=None): - """ - An Expression node (not to be confused with expressions denoted by the instructor nodes in Name ast nodes) - checks whether it should be generic, or not - Args: - ins_node: Instructor ast to find in the student ast - std_node: Student AST to search for the instructor ast in - check_meta: flag to check whether the fields of the instructor node and the student node should match - pre_match: An AstMap from a previous matching run - - Returns: - AstMap: a mapping between the instructor and student asts, or False if such a mapping doesn't exist - """ - # if check_meta and ins_node.field != std_node.field: - if not self.metas_match(ins_node, std_node, check_meta): - return [] - mapping = AstMap() if pre_match is None else pre_match - value = ins_node.value - ast_type = type(value.astNode).__name__ - if ast_type == "Name": - name_id = value.astNode.id - exp_match = re.compile('^__.*__$') # /regex - wild_card = re.compile('^___$') # /regex - matched = False - meta_matched = self.metas_match(ins_node, std_node, check_meta) - if exp_match.match(name_id): # and meta_matched: # if expression - # terminate recursion, the whole subtree should match since expression nodes match to anything - mapping.add_exp_to_sym_table(value, std_node) - matched = True - elif wild_card.match(name_id) and meta_matched: # if wild card, don't care - # terminate the recursion, the whole subtree should match since wild cards match to anything - matched = True - if matched: - mapping.add_node_pairing(ins_node, std_node) - return [mapping] - return self.deep_find_match_generic(ins_node, std_node, check_meta) - - def deep_find_match_generic(self, ins_node, std_node, check_meta=True, ignores=None, pre_match=None): - """ - This first uses shallow match to find a base map (match) from which to - build off. The algorithm then tracks all the possible mappings that - match a given child node in the instructor AST, keeping track of which - siblings have been visited. - - For each instructor child, when all children of the student node have - been iterated through recursively, a helper function is called. This - helper function determines which possible children validly can extend - the base match to create a set of new base maps through use of the - indicies of the sibilings. - - The process repeats itself until no matches can be grown or until each - instructor child node has been visited - - Args: - ins_node: Instructor ast to find in the student ast - std_node: Student AST to search for the instructor ast in - check_meta: flag to check whether the fields of the instructor node and the student node should match - ignores: List of fields to ignore in the field match - pre_match: a map from a previous match - - Returns: - a mapping between the isntructor and student asts, or [] if such a mapping doesn't exist - """ - if ignores is None: - ignores = [] - base_mappings = self.shallow_match(ins_node, std_node, check_meta) - if base_mappings: - for mapping in base_mappings: - mapping.merge_map_with(pre_match) - # base case this runs 0 times because no children - # find each child of ins_node that matches IN ORDER - base_sibs = [-1] - youngest_sib = 0 - # for each child - for i, insChild in enumerate(ins_node.children): - # make a new set of maps - running_maps = [] - running_sibs = [] - if insChild.field in ignores: - continue - # accumulate all potential matches for current child - for j, std_child in enumerate(std_node.children[youngest_sib:], youngest_sib): - std_child = std_node.children[j] - new_mapping = self.deep_find_match(insChild, std_child, check_meta) - if new_mapping: - running_maps.append(new_mapping) - running_sibs.append(j) - map_update = self.map_merge(base_mappings, base_sibs, running_maps, running_sibs) - if map_update is None: - return [] - base_mappings = map_update['new_maps'] - base_sibs = map_update['new_sibs'] - youngest_sib = map_update['youngest_sib'] + 1 - return base_mappings - return [] - - # noinspection PyMethodMayBeStatic - def map_merge(self, base_maps, base_sibs, run_maps, run_sibs): - """ - Merges base_maps with the current possible maps. Helper method to deep_find_match_generic. checks whether each - mapping in run_maps can extend the match to any possible mapping in base_maps. - - Args: - base_maps: The original mappings - base_sibs: The corresponding siblings for each mapping in base_maps - run_maps: The set of maps to merge into the current base_maps - run_sibs: The corresponding siblings for each mapping in run_maps - - Returns: - A new set of maps for all valid extensions of base_maps with running maps - """ - # no matching nodes were found - if len(run_maps) == 0: - return None - new_maps = [] - new_sibs = [] - youngest_sib = run_sibs[0] - for baseMap, base_sib in zip(base_maps, base_sibs): - for run_map, runSib in zip(run_maps, run_sibs): - if runSib > base_sib: - for run_mapsub in run_map: - new_map = baseMap.new_merged_map(run_mapsub) - if not new_map.has_conflicts(): # if it's a valid mapping - new_maps.append(new_map) - new_sibs.append(runSib) - if len(new_maps) == 0: - return None - return { - 'new_maps': new_maps, - 'new_sibs': new_sibs, - 'youngest_sib': youngest_sib - } - - # noinspection PyMethodMayBeStatic,PyPep8Naming,PyUnusedLocal - def shallow_match_Module(self, ins_node, std_node, check_meta=True): - """ - Flexibly matches a module node to a module or a body - Args: - ins_node: - std_node: - check_meta: - - Returns: - a mapping of ins_node to std_node, or False if doesn't match - """ - if type(std_node.astNode).__name__ == "Module" or std_node.field == "body": - mapping = AstMap() - mapping.add_node_pairing(ins_node, std_node) - return [mapping] - return [] - - def shallow_symbol_handler(self, ins_node, std_node, id_val, check_meta=True): - """ - TODO: Make this handle the func field to handle functions - Matches ins_node to std_node for different cases of encountering a name node in ins_node - case 1: _var_ matches if std_node is a name node and automatically returns a mapping and symbol table - case 2: __exp__ matches to any subtree and automatically returns a mapping and symbol table - case 3: ___ matches to any subtree and automatically returns a mapping - case 4: matches only if the exact names are the same (falls through to shallow_match_generic) - Args: - ins_node: - std_node: - id_val: - check_meta: - - Returns: - list of AstMap: a mapping of ins_node to std_node and possibly a symbol_table, or False if it doesn't match - """ - name_id = ins_node.astNode.__getattribute__(id_val) - match = _name_regex(name_id) - mapping = AstMap() - matched = False - # TODO: add functionality to add function references to func_table? - meta_matched = self.metas_match(ins_node, std_node, check_meta) - if match[_VAR] and meta_matched: # variable - if type(std_node.astNode).__name__ == "Name" or id_val in ["attr", "arg"]: - if id_val in ["attr", "arg"]: - std_node.astNode._id = std_node.astNode.__getattribute__(id_val) - if std_node.field == "func" and ins_node.field != _NONE_FIELD: - # TODO: This 'ins_node.field != _NONE_FIELD' code is for an obscure edge case where the - # instructor code is only _var_ - std_node.astNode._id = std_node.astNode.__getattribute__(id_val) - mapping.add_func_to_sym_table(ins_node, std_node) - else: - std_node.astNode._id = std_node.astNode.__getattribute__(id_val) - mapping.add_var_to_sym_table(ins_node, std_node) # TODO: Capture result? - matched = True - # could else return False, but shallow_match_generic should do this as well - elif match[_EXP] and meta_matched: - mapping.add_exp_to_sym_table(ins_node, std_node) - matched = True - elif match[_WILD] and meta_matched: - matched = True - - if matched: - mapping.add_node_pairing(ins_node, std_node) - return [mapping] - # else - return self.shallow_match_main(ins_node, std_node, check_meta=check_meta, ignores=["ctx"]) - - # noinspection PyPep8Naming,PyMethodMayBeStatic - def shallow_match_arg(self, ins_node, std_node, check_meta=True): - ins_node.astNode._id = ins_node.arg - # TODO: annotations are currently ignored because shallow_symbol_handler doesn't handle them, feature? or - # should we fix this. Although this should actually be toggleable? - return self.shallow_symbol_handler(ins_node, std_node, "arg", check_meta=check_meta) - - def shallow_match_arguments(self, ins_node, std_node, check_meta=True): - # TODO: do we ignore default values? Currently not ignored - return self.shallow_match_generic(ins_node, std_node, check_meta=check_meta) - - # noinspection PyPep8Naming,PyMethodMayBeStatic - def shallow_func_handle(self, ins_node, std_node, check_meta=True): - if ins_node.field == "func" and std_node.field == "func": - ins_node.astNode._id = ins_node.astNode.attr - return self.shallow_symbol_handler(ins_node, std_node, "attr", check_meta) - return self.shallow_match_generic(ins_node, std_node, check_meta) - - def shallow_match_Attribute(self, ins_node, std_node, check_meta=True): - if ins_node.field == "func" and std_node.ast_name == "Attribute": - return self.shallow_func_handle(ins_node, std_node, check_meta) - elif std_node.ast_name == "Attribute": - ins_node.astNode._id = ins_node.attr # TODO: Fix this hack more gracefully - # add_var_to_sym_table in ast_map needs the id attribute to make the map - return self.shallow_symbol_handler(ins_node, std_node, "attr", check_meta) - else: - return self.shallow_match_generic(ins_node, std_node, check_meta) - - # noinspection PyPep8Naming - def shallow_match_Name(self, ins_node, std_node, check_meta=True): - """ - TODO: Make this handle the func field to handle functions - Matches ins_node to std_node for different cases of encountering a name node in ins_node - case 1: _var_ matches if std_node is a name node and automatically returns a mapping and symbol table - case 2: __exp__ matches to any subtree and automatically returns a mapping and symbol table - case 3: ___ matches to any subtree and automatically returns a mapping - case 4: matches only if the exact names are the same (falls through to shallow_match_generic) - Args: - ins_node: - std_node: - check_meta: - - Returns: - list of AstMap: a mapping of ins_node to std_node and possibly a symbol_table, or False if it doesn't match - """ - ins_node.ast_node._id = ins_node.id - return self.shallow_symbol_handler(ins_node, std_node, "id", check_meta) - - # noinspection PyPep8Naming,PyMethodMayBeStatic - def shallow_match_Pass(self, ins_node, std_node, check_meta=True): - """ - An empty body should match to anything - Args: - ins_node: Instructor ast to find in the student ast - std_node: Student AST to search for the instructor ast in - check_meta: flag to check whether the fields of the instructor node and the student node should match - - Returns: - list of AstMap: a mapping between the isntructor and student asts, or False if such a mapping doesn't exist - """ - # if check_meta and ins_node.field != std_node.field: - if not self.metas_match(ins_node, std_node, check_meta): - return [] - mapping = AstMap() - mapping.add_node_pairing(ins_node, std_node) - return [mapping] - - # noinspection PyPep8Naming,PyMethodMayBeStatic - def shallow_match_Expr(self, ins_node, std_node, check_meta=True): - """ - An Expression node (not to be confused with expressions denoted by the instructor nodes in Name ast nodes) - should match to anything - Args: - ins_node: Instructor ast to find in the student ast - std_node: Instructor ast to find in the student ast - check_meta: flag to check whether the fields of the instructor node and the student node should match - - Returns: - a mapping between the instructor and student asts, or False if such a mapping doesn't exist - """ - # if check_meta and ins_node.field != std_node.field: - if not self.metas_match(ins_node, std_node, check_meta): - return [] - mapping = AstMap() - mapping.add_node_pairing(ins_node, std_node) - return [mapping] - - def shallow_match_Call(self, ins_node, std_node, check_meta=True): - return self.shallow_match_main(ins_node, std_node, check_meta, ignores=None) - # matches = self.shallow_match_main(ins_node, std_node, check_meta, ignores=["func"]) - # if matches: - # pass - # return None - # TODO: Make this handle Calls more intelligently - - # noinspection PyPep8Naming - def shallow_match_FunctionDef(self, ins_node, std_node, check_meta=True): - ins = ins_node.astNode - std = std_node.astNode - meta_matched = self.metas_match(ins_node, std_node, check_meta) - is_match = type(ins).__name__ == type(std).__name__ and meta_matched - mapping = self.shallow_match_main(ins_node, std_node, check_meta, ignores=['name', 'args']) - matched = False - if is_match and mapping: - name = ins.name - match = _name_regex(name) - if match[_VAR] and meta_matched: # variable - ins._id = name - std._id = std.name - mapping[0].add_func_to_sym_table(ins_node, std_node) # TODO: Capture result? - matched = True - elif match[_WILD] and meta_matched: - matched = True - elif name == std.name and meta_matched: - matched = True - if matched: - return mapping - else: - return [] - - # noinspection PyMethodMayBeStatic - def shallow_match_generic(self, ins_node, std_node, check_meta=True): - """ - Checks that all non astNode attributes are equal between ins_node and std_node - Args: - ins_node: Instructor ast root node - std_node: Student AST root node - check_meta: flag to check whether the fields of the instructor node and the student node should match - - Returns: - list of AstMap: a mapping between the instructor and student root nodes (potentially empty) - """ - return self.shallow_match_main(ins_node, std_node, check_meta=check_meta) - - def shallow_match_main(self, ins_node, std_node, check_meta=True, ignores=None): - """ - Checks that all non astNode attributes are equal between ins_node and std_node - Args: - ins_node: Instructor ast root node - std_node: Student AST root node - check_meta: flag to check whether the fields of the instructor node and the student node should match - ignores: a mapping between the instructor and student root nodes, or False if such a mapping doesn't exist - - Returns: - - """ - if ignores is None: - ignores = [] - ignores.append("_id") # special exception for symbols in lookup tables - ins = ins_node.astNode - std = std_node.astNode - ins_field_list = list(ast.iter_fields(ins)) - std_field_list = list(ast.iter_fields(std)) - meta_matched = self.metas_match(ins_node, std_node, check_meta) - is_match = len(ins_field_list) == len(std_field_list) and type(ins).__name__ == type( - std).__name__ and meta_matched - for insTup, stdTup in zip(ins_field_list, std_field_list): - if not is_match: - break - - ins_field = insTup[0] - ins_value = insTup[1] - std_field = stdTup[0] - std_value = stdTup[1] - - if ins_value is None: - continue - - ignore_field = ins_field in ignores - - is_match = (ins_field == std_field) or ignore_field - - if not isinstance(ins_value, list): - ins_value = [ins_value] - - if not isinstance(std_value, list): - std_value = [std_value] - - # is_match = len(ins_value) == len(std_value)# for stretchy matching this isn't True - # Reference ast_node_visitor.js for the original behavior and keep note of it for the purposes of handling - # the children noting the special case when the nodes of the array are actually parameters of the node - # (e.g. a load function) instead of a child node - if not ignore_field: - for inssub_value, stdsub_value in zip(ins_value, std_value): - if not is_match: - break - # TODO: make this a smarter comparison, maybe handle dictionaries, f-strings, tuples, etc. - if is_primitive(inssub_value): - is_match = inssub_value == stdsub_value - if is_match: - mapping = AstMap() # return MAPPING - mapping.add_node_pairing(ins_node, std_node) - return [mapping] - else: - return [] - - # filter function for various types of nodes - def shallow_match(self, ins_node, std_node, check_meta=True): - method_name = 'shallow_match_' + type(ins_node.astNode).__name__ - target_func = getattr(self, method_name, self.shallow_match_generic) - return target_func(ins_node, std_node, check_meta=check_meta) - - @staticmethod - def metas_match(ins_node, std_node, check_meta=True): - """ - Args: - ins_node: - std_node: - check_meta: - - Returns: - - """ - return ((check_meta and ins_node.field == std_node.field) or - not check_meta - # or std_node.field == _NONE_FIELD - or ins_node.field == _NONE_FIELD) diff --git a/src/lib/pedal/mistakes/__init__.py b/src/lib/pedal/mistakes/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/lib/pedal/mistakes/instructor_append.py b/src/lib/pedal/mistakes/instructor_append.py deleted file mode 100644 index 21e0288e53..0000000000 --- a/src/lib/pedal/mistakes/instructor_append.py +++ /dev/null @@ -1,136 +0,0 @@ -from pedal.cait.cait_api import find_matches, find_expr_sub_matches, data_state -from pedal.report.imperative import gently_r, explain_r - - -def append_group_on_change(): - wrong_not_append_to_list() - - -def append_group(): - missing_append_in_iteration() - missing_append_list_initialization() - wrong_append_list_initialization() - wrong_not_append_to_list() - append_list_wrong_slot() - # TODO: add app_assign on next iteration of experiment! - # app_assign() - - -def find_append_in(node): - append_list = [] - calls = node.find_all("Call") - for node in calls: - if node.func.attr == "append": - append_list.append(node) - return append_list - - -""" -def missing_append_in_iteration(): - std_ast = parse_program() - for_loops = std_ast.find_all("For") - for loop in for_loops: - if len(find_append_in(loop)): - return False - explain("You must construct a list by appending values one at a time to the list.

    (app_in_iter)
    ") - return True -""" - - -def missing_append_in_iteration(): - message = "You must construct a list by appending values one at a time to the list." - code = "app_in_iter" - tldr = "For Loop Append Not Found" - matches = find_matches("for ___ in ___:\n" - " __expr__") - if matches: - for match in matches: - __expr__ = match["__expr__"] - submatch = __expr__.find_matches("___.append(___)") - if submatch: - return False - return explain_r(message, code, label=tldr) - return False - - -def wrong_not_append_to_list(): - message = ("Values can only be appended to a list. The variable {0!s} is either not initialized, " - "not initialized correctly, or is confused with another variable.") - code = "app_not_list" - tldr = "Not Appending to List" - matches = find_matches("for ___ in ___:\n" - " __expr__") - for match in matches: - __expr__ = match["__expr__"] - submatches = __expr__.find_matches("_target_.append(___)") - for submatch in submatches: - _target_ = submatch["_target_"] - if not data_state(_target_).was_type('list'): - return explain_r(message.format(_target_), code, label=tldr) - return False - - -def missing_append_list_initialization(): - message = "The list variable {0!s} must be initialized." - code = "no_app_list_init" - tldr = "List Not Initialized" - matches = find_matches("for ___ in ___:\n" - " __expr__") - for match in matches: - __expr__ = match["__expr__"] - submatches = __expr__.find_matches("_new_list_.append(___)", ) - for submatch in submatches: - _new_list_ = submatch["_new_list_"].astNode - # TODO: In theory revisit this by merging matches - matches02 = find_matches("{} = []\n" - "for ___ in ___:\n" - " __expr__".format(_new_list_.id)) - if not matches02: - return explain_r(message.format(_new_list_.id), code, label=tldr) - return False - - -def wrong_append_list_initialization(): - message = ("The list variable {0!s} is either not initialized " - "correctly or mistaken for another variable. " - "The list you append to should be initialized to an empty list.") - code = "app_list_init" - tldr = "Incorrect Initialization or Usage of Empty List" - matches = find_matches("_list_ = __expr1__\n" - "for ___ in ___:\n" - " __expr2__") - for match in matches: - _list_ = match["_list_"].astNode - __expr1__ = match["__expr1__"] - __expr2__ = match["__expr2__"] - submatch = __expr2__.find_matches("_list_.append(___)") - if submatch and (__expr1__.ast_name == "List" and - len(__expr1__.elts) != 0 or - __expr1__.ast_name != "List"): - return explain_r(message.format(_list_.id), code, label=tldr) - return False - - -def append_list_wrong_slot(): - message = "You should not append a list ({0!s}) to {1!s}." - code = "app_list_slot" - tldr = "Appending List Error" - matches = find_matches("_target_.append(_item_)") - if matches: - for match in matches: - _item_ = match["_item_"].astNode - _target_ = match["_target_"].astNode - if data_state(_item_).was_type('list'): - return explain_r(message.format(_item_.id, _target_.id), code, label=tldr) - return False - - -def app_assign(): - message = ("Appending modifies the list, so unlike addition," - " an assignment statement is not needed when using append.") - code = "app_asgn" - - matches = find_matches("_sum_ = _sum_.append(__exp__)") - if matches: - return explain_r(message, code) - return False diff --git a/src/lib/pedal/mistakes/instructor_filter.py b/src/lib/pedal/mistakes/instructor_filter.py deleted file mode 100644 index 7362ae24ef..0000000000 --- a/src/lib/pedal/mistakes/instructor_filter.py +++ /dev/null @@ -1,53 +0,0 @@ -from pedal.cait.cait_api import find_match, find_matches -from pedal.report.imperative import gently_r, explain_r - - -def filter_group(): - missing_if_in_for() - append_not_in_if() - - -def missing_if_in_for(): - """ - Name: missing_if_in_for - Pattern: - missing - for in ___ : - if ... ... : - - Feedback: The arrangement of decision and iteration is not correct for the filter pattern. - Returns: - - """ - message = ("The arrangement of decision and iteration is not correct for the filter pattern. " - "You need to evaluate the decision for each element of the list.") - code = "missing_if_in_for" - tldr = "Missing if In For" - matches = find_matches("for _item_ in ___:\n" - " if __expr__:\n" - " pass") - if not matches: - return explain_r(message, code, label=tldr) - return False - - -def append_not_in_if(): - """ - Name: append_not_in_if - Pattern: - missing - if ... : - ___.append(___) - - Feedback: Only items satisfying some condition should be appended to the list. - - Returns: - """ - message = "Only items satisfying some condition should be appended to the list." - code = "app_not_in_if" - tldr = "Append not in if" - match = find_match("if ___:\n" - " ___.append(___)") - if not match: - return explain_r(message, code, label=tldr) - return False diff --git a/src/lib/pedal/mistakes/instructor_histogram.py b/src/lib/pedal/mistakes/instructor_histogram.py deleted file mode 100644 index 7893bee929..0000000000 --- a/src/lib/pedal/mistakes/instructor_histogram.py +++ /dev/null @@ -1,124 +0,0 @@ -from pedal.cait.cait_api import find_match, find_matches, data_state -from pedal.report.imperative import gently_r, explain_r - - -def histogram_group(): - histogram_argument_not_list() - histogram_wrong_list() - histogram_missing() - plot_show_missing() - - -def histogram_missing(): - """ - Name: histogram_missing - Pattern: - - Missing - plt.hist(___) - - Feedback: The program should display a histogram. - - Returns: - """ - message = "The program should display a histogram." - code = "histo_missing" - tldr = "Missing Histogram" - match = find_match("plt.hist(___)") - if not match: - return explain_r(message, code, label=tldr) - return False - - -def plot_show_missing(): - """ - Name: plot_show_missing - Pattern: - Missing - plt.show() - - Feedback: The plot must be explicitly shown to appear in the Printer area. - - Returns: - """ - message = "The plot must be explicitly shown to appear in the Printer area." - code = "plot_show_missing" - tldr = "No Plot Shown" - match = find_match("plt.show()") - if not match: - return explain_r(message, code, label=tldr) - return False - - -def histogram_argument_not_list(): - """ - - Name: histogram_argument_not_list - Pattern: - plt.hist() - Where type() is not "list" - - Feedback: Making a histogram requires a list; is not a list. - - - Returns: - """ - message = "Making a histogram requires a list; {0!s} is not a list." - code = "hist_arg_not_list" - tldr = "Making Histogram from Non-list" - matches = find_matches("plt.hist(_argument_)") - if matches: - for match in matches: - _argument_ = match["_argument_"].astNode - if not _argument_.get_data_state() or not _argument_.get_data_state().was_type('list'): - return explain_r(message.format(_argument_.id), code, label=tldr) - return False - - -def histogram_wrong_list(): - """ - - Name: histogram_wrong_list - Pattern: - - for ___ in ___: - .append(___) - plt.hist() - - where name() != name() - - Feedback: The list created in the iteration is not the list being used to create the histogram. - - Returns: - """ - message = "The list created in the iteration is not the list being used to create the histogram." - code = "histo_wrong_list" - tldr = "Plotting Wrong List" - matches = find_matches("for ___ in ___:\n" - " __expr__\n" - "plt.hist(_list_)") - if matches: - for match in matches: - _list_ = match["_list_"].astNode - __expr__ = match["__expr__"] - submatches = __expr__.find_matches("_list_.append(___)") - if submatches: - return False - return explain_r(message, code, label=tldr) - return False - - -def histogram_wrong_placement(): - message = "The histogram should be plotted only once, after the new list has been created" - code = "histo_wrong_place" - tldr = "Histogram Plot Placed Incorrectly" - matches = find_matches("for ___ in ___:\n" - " pass\n") - if matches: - matches02 = find_matches("plt.hist(___)") - for match in matches: - if matches02: - for match02 in matches02: - if match02.match_lineno > match.match_lineno: - return False - return explain_r(message, code, label=tldr) diff --git a/src/lib/pedal/mistakes/instructor_iteration.py b/src/lib/pedal/mistakes/instructor_iteration.py deleted file mode 100644 index 4b1817eb57..0000000000 --- a/src/lib/pedal/mistakes/instructor_iteration.py +++ /dev/null @@ -1,153 +0,0 @@ -from pedal.cait.cait_api import (parse_program, find_match, find_matches, - find_expr_sub_matches, data_state, - def_use_error) -from pedal.report.imperative import gently_r, explain_r - - -def iteration_group(): - list_initialization_misplaced() - wrong_target_is_list() - wrong_list_repeated_in_for() - missing_iterator_initialization() - list_not_initialized_on_run() - wrong_iterator_not_list() - missing_target_slot_empty() - missing_for_slot_empty() - wrong_target_reassigned() - - -def iteration_group_on_change(): - wrong_target_is_list() - wrong_list_repeated_in_for() - wrong_iterator_not_list() - - -def all_for_loops(): - std_ast = parse_program() - return std_ast.find_all("For") - - -# this conflics with list_repeated_in_for -def wrong_target_is_list(): - message = ('The variable {0!s} is a list and ' - 'should not be placed in the iteration variable slot of the "for" block') - code = "target_is_list" - tldr = "Iteration Variable Overwriting List" - match = find_match("for _item_ in ___:\n pass") - if match: - _item_ = match["_item_"].astNode - if data_state(_item_).was_type('list'): - return explain_r(message.format(_item_.id), code, label=tldr) - return False - - -# this conflicts with list_in_wrong_slot_in_for -def wrong_list_repeated_in_for(): - message = 'The {0!s} variable can only appear once in the "for" block.' - code = "list_repeat" - tldr = "Duplicate Iteration Variable" - match = find_match("for _item_ in _item_:\n pass") - if match: - _item_ = match["_item_"].astNode - if data_state(_item_).was_type('list'): - return explain_r(message.format(_item_.id), code, label=tldr) - return False - - -# this isn't consistent with the pattern you wrote TODO: Fix this -def missing_iterator_initialization(): - message1 = "The slot to hold a list in the iteration is empty." - code1 = "no_iter_init-blank" - tldr1 = "Iteration Variable is Blank" - - message2 = "The variable {0!s} is in the list slot of the iteration but is not a list." - code2 = "no_iter_init" - tldr2 = "Iteration Variable is Not a List" - - match = find_match("for ___ in _list_:\n pass") - if match: - _list_ = match["_list_"].astNode - if _list_.id == "___": - return explain_r(message1, code1, label=tldr1) - elif not data_state(_list_).was_type('list'): - return explain_r(message2.format(_list_.id), code2, label=tldr2) - return False - - -# TODO: We need to cover the different cases for these -def wrong_iterator_not_list(): - message = ("The variable {0!s} has been set to something that is not a list but is placed " - "in the iteration block that must be a list.") - code = "iter_not_list" - tldr = "Iteration List is not list" - - match = find_match("for ___ in _item_:\n pass") - if match: - _item_ = match["_item_"].astNode - if not data_state(_item_).was_type('list'): - return explain_r(message.format(_item_.id), code, label=tldr) - return False - - -def missing_target_slot_empty(): - message = "You must fill in the empty slot in the iteration." - code = "target_empty" - tldr = "Missing Iteration Variable" - match = find_match("for _item_ in ___:\n pass") - if match: - _item_ = match["_item_"].astNode - if _item_.id == "___": - return explain_r(message, code, label=tldr) - return False - - -def list_not_initialized_on_run(): - message = "The list in your for loop has not been initialized." - code = "no_list_init" - tldr = "List Variable Uninitialized" - match = find_match("for ___ in _item_:\n pass") - if match: - _item_ = match["_item_"][0].astNode - if def_use_error(_item_): - return explain_r(message, code, label=tldr) - return False - - -def list_initialization_misplaced(): - message = "Initialization of {0!s} is a list but either in the wrong place or redefined" - code = "list_init_misplaced" - tldr = "Iterating over Non-list" - match = find_match("for ___ in _item_:\n pass") - if match: - _item_ = match["_item_"][0].astNode - if data_state(_item_).was_type('list') and def_use_error(_item_): - return explain_r(message.format(_item_.id), code, label=tldr) - return False - - -def missing_for_slot_empty(): - message = "You must fill in the empty slot in the iteration." - code = "for_incomplete" - tldr = "Iteration Incomplete" - match = find_match("for _item_ in _list_:\n pass") - if match: - _item_ = match["_item_"][0].astNode - _list_ = match["_list_"][0].astNode - if _item_.id == "___" or _list_.id == "___": - return explain_r(message, code, label=tldr) - return False - - -def wrong_target_reassigned(): - message = "The variable {0!s} has been reassigned. The iteration variable shouldn't be reassigned" - code = "target_reassign" - tldr = "Iteration Variable has been Reassigned" - matches = find_matches("for _item_ in ___:\n" - " __expr__") - for match in matches: - __expr__ = match["__expr__"] - _item_ = match["_item_"][0] - submatches = __expr__.find_matches("_item_ = ___") - if submatches: - return explain_r(message.format(_item_), code, label=tldr) - return False diff --git a/src/lib/pedal/mistakes/iteration_context.py b/src/lib/pedal/mistakes/iteration_context.py deleted file mode 100644 index 4df295854b..0000000000 --- a/src/lib/pedal/mistakes/iteration_context.py +++ /dev/null @@ -1,1194 +0,0 @@ -from pedal.cait.cait_api import (parse_program, - find_matches, find_match, - find_expr_sub_matches) -from pedal.report.imperative import explain, gently -import pedal.mistakes.instructor_append as append_api -from pedal.toolkit.utilities import * -from pedal.sandbox.compatibility import get_output, get_plots -from pedal.report.imperative import gently_r, explain_r - - -# ################8.2 Start####################### -def wrong_list_length_8_2(): - message = "You must have at least three pieces" - code = "list length_8.2" - tldr = "List too short" - matches = find_matches("_list_ = __expr__") - if matches: - for match in matches: - __expr__ = match["__expr__"] - if __expr__.ast_name == "List" and len(__expr__.elts) < 3: - return explain_r(message, code, label=tldr) - return False - - -def missing_list_initialization_8_2(): - message = ('You must set the variable shopping_cart' - 'to a list containing the prices of items in the shopping cart.') - code = "missing_list_init_8.2" - tldr = "Missing list initialization" - matches = find_matches("shopping_cart = __expr__") - for match in matches: - __expr__ = match["__expr__"] - if __expr__.ast_name == "List": - return False - return explain_r(message, code, label=tldr) - - -def wrong_list_is_constant_8_2(): - message = 'You must set shoppping_cart to a list of values not to a single number.' - code = "list_is_const_8.2" - tldr = "Shopping Cart not set to list" - matches = find_matches("shopping_cart = __expr__") - for match in matches: - __expr__ = match["__expr__"] - if __expr__.ast_name == "Num": - return explain_r(message, code, label=tldr) - return False - - -def list_all_zeros_8_2(): - message = 'Try seeing what happens when you change the numbers in the list.' - code = 'default_list_8.2' - tldr = 'Use different numbers' - matches = find_matches("_var_ = [__list__]") - for match in matches: - __list__ = match['__list__'] - list_node = __list__.parent - all_num = list_node.find_all("Num") - all_zeros = True - for num in all_num: - if num.n != 0: - all_zeros = False - break - if all_zeros: - return explain_r(message, code, label=tldr) - return False - - -# ################8.2 End####################### - - -# ################8.3 Start####################### -def wrong_list_initialization_placement_8_3(): - message = ('The list of episode lengths (episode_length_list)' - ' must be initialized before the iteration which uses this list.') - code = "init_place_8.3" - tldr = "Wrong Initialization Placement" - for_matches = find_matches("for ___ in ___:\n" - " pass") - init_matches = find_matches("episode_length_list = ___") - if init_matches and for_matches: - for for_match in for_matches: - for_lineno = for_match.match_lineno - for init_match in init_matches: - if init_match.match_lineno > for_lineno: - return explain_r(message, code, label=tldr) - return False - - -def wrong_accumulator_initialization_placement_8_3(): - message = ('The variable to hold the sum of the episode lengths (sum_length) ' - 'must be initialized before the iteration which uses this variable.') - code = "accu_init_place_8.3" - tldr = "Accumulator initialization misplaced" - for_matches = find_matches("for ___ in ___:" - " pass") - init_matches = find_matches("sum_length = 0") - if init_matches and for_matches: - for for_match in for_matches: - for_lineno = for_match.match_lineno - for init_match in init_matches: - if init_match.match_lineno > for_lineno: - return explain_r(message, code, label=tldr) - return False - - -def wrong_iteration_body_8_3(): - message = "The addition of each episode length to the total length is not in the correct place." - code = "iter_body_8.3" - tldr = "Accumulation Misplaced" - match = find_match("for _item_ in _list_:\n" - " sum_length = ___ + ___\n") - if not match: - return explain_r(message, code, label=tldr) - return False - - -def wrong_print_8_3(): - message = ('The output of the total length of time is not in the correct place. The total length of time should be' - ' output only once after the total length of time has been computed.') - code = "print_8.3" - tldr = "Print statement misplaced" - match = find_match("for _item_ in _list_:\n" - " pass\n" - "print(_total_)") - if not match: - return explain_r(message, code, label=tldr) - return False - - -# ################8.3 End####################### - - -# ################8.4 Start####################### -def missing_target_slot_empty_8_4(): - message = 'You must fill in the empty slot in the iteration.' - code = 'target_empty_8.4' - tldr = "Iteration Variable Empty" - matches = find_matches("for _item_ in pages_count_list:\n" - " pass") - if matches: - for match in matches: - _item_ = match["_item_"][0] - if _item_.id == "___": - return explain_r(message, code, tldr) - return False - - -def missing_addition_slot_empty_8_4(): - message = "You must fill in the empty slot in the addition." - code = "add_empty_8.4" - tldr = "Addition Blank" - matches = find_matches("sum_pages + _item_") - if matches: - for match in matches: - _item_ = match["_item_"][0] - if _item_.id == "___": - return explain_r(message, code, label=tldr) - return False - - -def wrong_names_not_agree_8_4(): - message = "Each value of {0!s} must be added to {1!s}." - code = "name_agree_8.4" - tldr = "Iteration Variable and Accumulation Mismatch" - matches = find_matches("for _item1_ in pages_count_list:\n" - " sum_pages = sum_pages + _item2_") - if matches: - for match in matches: - # in theory, these will always be different? should test in test_cait - _item1_ = match["_item1_"][0] - _item2_ = match["_item2_"][0] - if _item1_.id != _item2_.id: - return explain_r(message.format(_item1_.id, _item2_.id), code, label=tldr) - return False - - -# ################8.4 End####################### -def wrong_modifying_list_8_5(): - """ - - # old code for record keeping because significantly different semantics - std_ast = parse_program() - list_init = std_ast.find_all('List') - true_sum = 0 - if len(list_init) != 0: - for value in list_init[0].elts: - true_sum = value.n + true_sum - if true_sum != sum([20473, 27630, 17849, 19032, 16378]) or len(list_init) == 0: - explain('Don\'t modify the list

    (mod_list_8.5)
    ') - return True - return False - - Returns: - """ - message = "Don't modify the list" - code = "mod_list_8.5" - match = find_match("[20473, 27630, 17849, 19032, 16378]") - if not match: - return explain_r(message, code) - return False - - -def wrong_modifying_list_8_6(): - """ - std_ast = parse_program() - list_init = std_ast.find_all('List') - true_sum = 0 - for value in list_init[0].elts: - true_sum = value.n + true_sum - if true_sum != sum([2.9, 1.5, 2.3, 6.1]): - explain('Don\'t modify the list

    (mod_list_8.6)
    ') - Returns: - """ - message = "Don't modify the list" - code = "mod_list_8.6" - match = find_match("_list_ = [2.9, 1.5, 2.3, 6.1]") - if not match: - return explain_r(message, code) - return False - - -def wrong_should_be_counting(): - """ - std_ast = parse_program() - for_loops = std_ast.find_all('For') - for loop in for_loops: - iter_prop = loop.target - assignments = loop.find_all('Assign') - for assignment in assignments: - binops = assignment.find_all('BinOp') - for binop in binops: - if binop.has(iter_prop) and binop.op == 'Add': - explain('This problem asks for the number of items in the list not the total of all the values in ' - 'the list.

    (not_count)
    ') - Returns: - """ - message = "This problem asks for the number of items in the list not the total of all the values in the list." - code = "not_count" - tldr = "Summing instead of counting" - matches = find_matches("for _item_ in ___:\n" - " __expr__") - if matches: - for match in matches: - _item_ = match["_item_"][0] - __expr__ = match["__expr__"] - submatches = __expr__.find_matches("___ = ___ + _item_") - if submatches: - return explain_r(message, code, label=tldr) - return False - - -def wrong_should_be_summing(): - """ - std_ast = parse_program() - for_loops = std_ast.find_all('For') - for loop in for_loops: - assignments = loop.find_all('Assign') - for assignment in assignments: - binops = assignment.find_all('BinOp') - for binop in binops: - if binop.has(1) and binop.op == 'Add': - explain('This problem asks for the total of all the values in the list not the number of items in ' - 'the list.

    (not_sum)
    ') - """ - message = "This problem asks for the total of all the values in the list not the number of items in the list." - code = "not_sum" - tldr = "Counting instead of summing" - matches = find_matches("for _item_ in ___:\n" - " __expr__") - if matches: - for match in matches: - __expr__ = match["__expr__"] - submatches = __expr__.find_matches("___ = 1 + ___", ) - if submatches: - return explain_r(message, code, label=tldr) - return False - - -def missing_addition_slot_empty(): - """ - std_ast = parse_program() - assignments = std_ast.find_all('Assign') - for assignment in assignments: - # left = assignment.target - right = assignment.value - binOp = right.find_all('BinOp') - if len(binOp) == 1: - binOp = binOp[0] - if binOp.op == 'Add': - if binOp.left.ast_name == 'Name' and binOp.right.ast_name == 'Name': - if binOp.left.id == '___' or binOp.right.id == '___': - explain('You must fill in the empty slot in the addition.

    (add_empty)
    ') - return True - return False - Returns: - """ - message = "You must fill in the empty slot in the addition." - code = "add_empty" - tldr = "Addition Blank" - matches = find_matches("___ + _item_") - if matches: - for match in matches: - _item_ = match["_item_"][0] - if _item_.id == "___": - return explain_r(message, code, tldr) - return False - - -def wrong_cannot_sum_list(): - """ - - std_ast = parse_program() - for_loops = std_ast.find_all('For') - for loop in for_loops: - list_prop = loop.iter - assignments = loop.find_all('Assign') - for assignment in assignments: - binops = assignment.find_all('BinOp') - for binop in binops: - if binop.has(list_prop) and binop.op == 'Add': - explain('Addition can only be done with a single value at a time, not with an entire list at one' - ' time.

    (sum_list)
    ') - Returns: - """ - message = 'Addition can only be done with a single value at a time, not with an entire list at one' - code = "sum_list" - tldr = "Cannot Sum a List" - matches = find_matches("for ___ in _list_ :\n" - " __expr__") - if matches: - for match in matches: - _list_ = match["_list_"][0] - __expr__ = match["__expr__"] - # submatches = __expr__.find_matches("___ = ___ + {}".format(_list_.id), ) - submatches = __expr__.find_matches("___ = ___ + _list_") - if submatches: - return explain_r(message, code, label=tldr) - return False - - -def missing_no_print(): - message = "Program does not output anything." - code = "no_print" - tldr = "Missing Output" - prints = find_match('print(___)', cut=True) - if not prints: - return explain_r(message, code, label=tldr) - return False - - -def missing_counting_list(): - """ - std_ast = parse_program() - has_count = False - for_loops = std_ast.find_all('For') - if len(for_loops) > 0: - for loop in for_loops: - assignments = loop.find_all('Assign') - if len(assignments) < 1: - continue - for assignment in assignments: - binops = assignment.find_all('BinOp') - if len(binops) < 1: - continue - lhs = assignment.target - for binop in binops: - if binop.has(lhs) and binop.has(1) and binop.op == 'Add': - has_count = True - if not has_count: - explain('Count the total number of items in the list using iteration.

    (miss_count_list)
    ') - Returns: - """ - message = 'Count the total number of items in the list using iteration.' - code = "miss_count_list" - tldr = "Missing Count in Iteration" - matches = find_matches("for _item_ in ___:\n" - " __expr__") - if matches: - for match in matches: - __expr__ = match["__expr__"] - submatches = __expr__.find_matches("_sum_ = _sum_ + 1", ) - if submatches: - return False - return explain_r(message, code, label=tldr) - - -def missing_summing_list(): - """ - std_ast = parse_program() - has_total = False - for_loops = std_ast.find_all('For') - if len(for_loops) > 0: - for loop in for_loops: - assignments = loop.find_all('Assign') - if len(assignments) < 1: - continue - iter_prop = loop.target - for assignment in assignments: - binops = assignment.find_all('BinOp') - if len(binops) < 1: - continue - lhs = assignment.target - for binop in binops: - if binop.has(lhs) and binop.has(iter_prop) and binop.op == 'Add': - has_total = True - if not has_total: - explain('Sum the total of all list elements using iteration.

    (miss_sum_list)
    ') - Returns: - """ - message = 'Sum the total of all list elements using iteration.' - code = "miss_sum_list" - tldr = "Missing Sum in Iteration" - matches = find_matches("for _item_ in ___:\n" - " __expr__") - if matches: - for match in matches: - _item_ = match["_item_"][0] - __expr__ = match["__expr__"] - submatches = __expr__.find_matches("_sum_ = _sum_ + _item_") - if submatches: - return False - return explain_r(message, code, label=tldr) - - -def missing_zero_initialization(): - """ - - std_ast = parse_program() - for_loops = std_ast.find_all('For') - accumulator = None - loop_acu = None - for loop in for_loops: - assignments = loop.find_all('Assign') - for assignment in assignments: - binops = assignment.find_all('BinOp') - if len(binops) > 0: - lhs = assignment.target - for binop in binops: - if binop.has(lhs) and binop.op == 'Add': - accumulator = lhs - loop_acu = loop - accu_init = False - if accumulator is not None: - assignments = std_ast.find_all('Assign') - for assignment in assignments: - if loop_acu.lineno > assignment.lineno: - lhs = assignment.target - if lhs.id == accumulator.id and assignment.has(0): - accu_init = True - break - if not accu_init and accumulator is not None: - explain('The addition on the first iteration step is not correct because either the variable ' - '{0!s} has not been initialized to an appropriate initial value or it has not been placed' - ' in an appropriate location

    (miss_zero_init)
    '.format(accumulator.id)) - return False - return True - Returns: - """ - - message = ('The addition on the first iteration step is not correct because either the variable {0!s} ' - 'has not been initialized to an appropriate initial value ' - 'or it has not been placed in an appropriate location') - code = "miss_zero_init" - tldr = "Missing Initialization for Accumulator" - matches01 = find_matches("for ___ in ___:\n" - " __expr__") - if matches01: - for match01 in matches01: - __expr__ = match01["__expr__"] - submatches01 = __expr__.find_matches("_sum_ = _sum_ + ___", ) - if submatches01: - for submatch01 in submatches01: - _sum_ = submatch01["_sum_"][0] - matches02 = find_matches(("{} = 0\n" - "for ___ in ___:\n" - " __expr__").format(_sum_.id)) - if not matches02: - return explain_r(message.format(_sum_.id), code, label=tldr) - return False - - -def wrong_printing_list(): - message = 'You should be printing a single value.' - code = "list_print" - tldr = "Printing in Iteration" - matches = find_matches("for ___ in ___:\n" - " __expr__") - if matches: - for match in matches: - __expr__ = match["__expr__"] - if __expr__.find_matches("print(___)", ): - return explain_r(message, code, label=tldr) - return False - - -# TODO: This might be reason to rethink letting instructor symbols map to multiple items -def missing_average(): - message = "An average value is not computed.<" - code = "no_avg" - tldr = "Missing Computation" - matches_missing = find_matches("for ___ in ___:\n" - " pass\n" - "__expr__") - matches = [] - if matches_missing: - for match in matches_missing: - __expr__ = match["__expr__"] - sub_matches = __expr__.find_matches("_total_/_count_", ) - if sub_matches: - for sub_match in sub_matches: - _total_ = sub_match["_total_"][0] - _count_ = sub_match["_count_"][0] - if _total_.id != _count_.id: - matches.append(match) - if not len(matches) > 0: - return explain_r(message, code, label=tldr) - return False - - -def warning_average_in_iteration(): - message = ('An average value is best computed after the properties name {0!s}(total) and ' - '{1!s} are completely known rather than recomputing the average on each iteration.') - code = "avg_in_iter" - tldr = "Redundant Average Calculation" - matches = find_matches("for ___ in ___:\n" - " __expr__\n") - if matches: - for match in matches: - __expr__ = match["__expr__"] - submatches = __expr__.find_matches("_average_ = _total_/_count_", ) - if submatches: - for submatch in submatches: - _total_ = submatch["_total_"][0] - _count_ = submatch["_count_"][0] - _average_ = submatch["_average_"][0] - if _total_.id != _count_.id != _average_.id and _total_.id != _average_.id: - return explain_r(message.format(_total_.id, _count_.id), code, label=tldr) - - return False - - -def wrong_average_denominator(): - message = "The average is not calculated correctly." - code = "avg_denom" - tldr = "Incorrect Average Calculation" - matches = find_matches("for ___ in ___:\n" - " __expr__\n" # where expr contains _count_ = _count_ + 1 - "__expr2__") # where expr2 contains ___/_value_ - # where _value_.id != _count_.id - if matches: - for match in matches: - __expr__ = match["__expr__"] - __expr2__ = match["__expr2__"] - # _value_ = match["_value_"][0] - submatches = __expr__.find_matches("_count_ = _count_ + 1", ) - submatches02 = find_expr_sub_matches("___/_value_", __expr2__) - if submatches and submatches02: - for submatch in submatches: - for submatch02 in submatches02: - _count_ = submatch["_count_"][0] - _value_ = submatch02["_value_"][0] - if _count_.id != _value_.id: - return explain_r(message, code, label=tldr) - return False - - -def wrong_average_numerator(): - message = "The average is not calculated correctly." - code = "avg_numer" - tldr = "Incorrect Average Calculation" - matches = find_matches("for _item_ in ___:\n" - " __expr__\n" # where expr contains _total_ = _total_ + 1 - "__expr2__") # where expr2 contains _value_/___ - if matches: - for match in matches: - __expr__ = match["__expr__"] - __expr2__ = match["__expr2__"] - _item_ = match["_item_"][0] - # TODO: In theory, we could merge these matches to match variables... - submatches = __expr__.find_matches("_total_ = _total_ + _item_") - # submatches02 = find_expr_sub_matches("_value_/___", __expr2__) - submatches02 = __expr2__.find_matches("_value_/___") - if submatches and submatches02: - for submatch in submatches: - for submatch02 in submatches02: - _value_ = submatch02["_value_"][0] - _total_ = submatch["_total_"][0] - if _total_.id != _value_.id: - return explain_r(message, code, label=tldr) - return False - - -# #######################AVERAGE END########################### -def wrong_compare_list(): - message = "Each item in the list {0!s} must be compared one item at a time." - code = "comp_list" - tldr = "Not Comparing Each Item" - matches = find_matches("for ___ in _list_:\n" - " if __expr__:\n" - " pass") - if matches: - for match in matches: - _list_ = match["_list_"][0] - __expr__ = match["__expr__"] - if __expr__.has(_list_.astNode): - return explain_r(message.format(_list_.id), code, label=tldr) - return False - - -def wrong_for_inside_if(): - message = "The iteration should not be inside the decision block." - code = "for_in_if" - tldr = "For inside if" - match = find_match("if ___:\n" - " for ___ in ___:\n" - " pass") - if match: - return explain_r(message, code, label=tldr) - return False - - -def iterator_is_function(): - message = "You should make a variable for the list instead of using a function call for the list" - code = "iter_is_func" - tldr = "Using Function Call instead of List" - std_ast = parse_program() - for_loops = std_ast.find_all('For') - # noinspection PyBroadException - try: - for loop in for_loops: - list_prop = loop.iter - if list_prop.ast_name == 'Call': - return explain_r(message, code, label=tldr) - except Exception: - return False - return False - - -# ##########################9.1 START############################ -def wrong_list_initialization_9_1(): - message = "The list of rainfall amounts (rainfall_list) is not initialized properly." - code = "list_init_9.1" - tldr = "Incorrect List Initialization" - match = find_match('rainfall_list = weather.get("Data.Precipitation","Station.Location","Blacksburg, VA")') - if not match: - return explain_r(message, code, label=tldr) - return False - - -def wrong_accumulator_initialization_9_1(): - message = ("The variable to hold the total value of the rainfall amounts (rainfall_sum) " - "is not initialized properly.") - code = "accu_init_9.1" - tldr = "Incorrect Accumulation Variable initialization" - match = find_match("rainfall_sum = 0") - if not match: - return explain_r(message, code, label=tldr) - return False - - -def wrong_accumulation_9_1(): - message = "The addition of each rainfall amount to rainfall_sum is not correct." - code = "accu_9.1" - tldr = "Incorrect Accumulation Statement" - matches = find_matches("rainfall_sum = _item_ + rainfall") - if matches: - for match in matches: - _item_ = match["_item_"][0] - if _item_.id != "rainfall_sum": - return explain_r(message, code, label=tldr) - return False - - -def wrong_list_initialization_placement_9_1(): - message = ("The list of rainfall amount (rainfall_list) " - "must be initialized before the iteration that uses this list.") - code = "list_init_place_9.1" - tldr = "List initialization Misplaced or Missing" - match = find_match("rainfall_list = ___\n" - "for _item_ in _list_:\n" - " pass") - if not match: - return explain_r(message, code, label=tldr) - return False - - -def wrong_accumulator_initialization_placement_9_1(): - message = ("The variable for the sum of all the rainfall amounts (rainfall_sum) " - "must be initialized before the iteration which uses this variable.") - code = "accu_init_place_9.1" - tldr = "Accumulator Initialization Misplaced or missing" - matches = find_matches("rainfall_sum = ___\n" - "for _item_ in _list_:\n" - " pass") - if not matches: - return explain_r(message, code, label=tldr) - return False - - -def wrong_iteration_body_9_1(): - message = "The addition of each rainfall amount to the total rainfall is not in the correct place." - code = "iter_body_9.1" - tldr = "Accumulation Statement Misplaced or Missing" - matches = find_matches("for _item_ in _list_:\n" - " rainfall_sum = ___") - if not matches: - return explain_r(message, code, label=tldr) - return False - - -def wrong_print_9_1(): - """ - Returns: - """ - message = ('The output of the total rainfall amount is not in the correct place. The total rainfall should be ' - 'output only once after the total rainfall has been computed.') - code = "print_9.1" - tldr = "Print Statement Misplaced or Missing" - match = find_match("for _item_ in _list_:\n" - " pass\n" - "print(_total_)") - if not match: - return explain_r(message, code, label=tldr) - return False - - -# ##########################9.1 END############################ - - -# ##########################9.2 START############################ -def wrong_list_initialization_9_2(): - message = "The list of rainfall amounts (rainfall_list) is not initialized properly." - code = "list_init_9.2" - tldr = "Incorrect List Initialization" - matches = find_matches('rainfall_list = weather.get("Data.Precipitation","Station.Location","Blacksburg, VA")') - if not matches: - return explain_r(message, code, label=tldr) - return False - - -def wrong_accumulator_initialization_9_2(): - message = ("The variable to hold the total value of the rainfall amounts " - "(rainfall_count) is not initialized properly.") - code = "accu_init_9.2" - tldr = "Incorrect Initialization" - if not find_matches("rainfall_count = 0"): - return explain_r(message, code, label=tldr) - return False - - -def wrong_accumulation_9_2(): - message = ('The adding of another day with rainfall to the total ' - 'count of days with rainfall (rainfall_count) is not correct.') - code = "accu_9.2" - tldr = "Accumulation Statement Incorrect" - matches = find_matches("rainfall_count = _item_ + 1") - if matches: - for match in matches: - _item_ = match["_item_"][0] - if _item_.id != "rainfall_count": - return explain_r(message, code, label=tldr) - return False - - -def wrong_list_initialization_placement_9_2(): - message = ("The list of rainfall amount (rainfall_list) " - "must be initialized before the iteration that uses this list.") - code = "list_init_place_9.2" - tldr = "Incorrect List Initialization Placement" - matches = find_matches("rainfall_list = ___\n" - "for _item_ in _list_:\n" - " pass") - if not matches: - return explain_r(message, code, label=tldr) - return False - - -def wrong_accumulator_initialization_placement_9_2(): - message = ("The variable for the count of the number of days having rain (rainfall_count) " - "must be initialized before the iteration which uses this variable.") - code = "accu_init_place_9.2" - tldr = "Accumulator Initialization Misplaced" - matches = find_matches("rainfall_count = ___\n" - "for _item_ in _list_:\n" - " pass") - if not matches: - return explain_r(message, code, label=tldr) - return False - - -def wrong_iteration_body_9_2(): - message = ("The test (if) to determine if a given amount " - "of rainfall is greater than (>) zero is not in the correct place.") - code = "iter_body_9.2" - tldr = "If statement misplaced" - matches = find_matches("for _item_ in _list_:\n" - " if __expr__:\n" - " pass") - if matches: - for match in matches: - __expr__ = match["__expr__"] - if __expr__.numeric_logic_check(1, 'var > 0'): - return False - return explain_r(message, code, label=tldr) - - -def wrong_decision_body_9_2(): - message = ("The increase by 1 in the number of days having rainfall " - "(rainfall_count) is not in the correct place.") - code = "dec_body_9.2" - tldr = "Accumulation Statement Misplaced" - matches = find_matches("if __expr__:\n" - " rainfall_count = rainfall_count + 1") - if matches: - for match in matches: - __expr__ = match["__expr__"] - if __expr__.numeric_logic_check(1, 'var > 0'): - return False - return explain_r(message, code, label=tldr) - - -def wrong_print_9_2(): - message = ("The output of the total number of days with rainfall is not in the correct place. The total number of " - "days should be output only once after the total number of days has been computed.") - code = "print_9.2" - tldr = "Misplaced Print Statement" - match = find_match("for _item_ in _list_:\n" - " pass\n" - "print(_total_)") - if not match: - return explain_r(message, code, label=tldr) - return False - - -# ##########################9.2 END############################ - - -# ##########################9.6 START############################ -def wrong_comparison_9_6(): - message = "In this problem you should be finding temperatures above 80 degrees." - code = "comp_9.6" - tldr = "Incorrect Comparison Statement" - matches = find_matches("if __comp__:\n" - " pass") - if matches: - for match in matches: - __comp__ = match["__comp__"] - if not __comp__.numeric_logic_check(1, 'var > 80'): - return explain_r(message, code, label=tldr) - return False - - -# ##########################9.6 END############################ - - -# ##########################10.2 START############################ -def wrong_conversion_10_2(): - """ - '''missing - for _target_ in ____ : - _target_ * 0.4 - ''' - Returns: - """ - message = "The conversion of {0!s} to inches is either missing, incorrect, or misplaced." - code = "conv_10.2" - tldr = "Incorrect/Missing Conversion" - matches = find_matches("for _target_ in ___:\n" - " __expr__") - for match in matches: - # code version 1 start - _target_ = match["_target_"][0] - __expr__ = match["__expr__"] - matches02 = __expr__.find_matches("_target_*0.04".format(_target_.id)) - if matches02: - return False - return explain_r(message.format(_target_.id), code, label=tldr) - return False - - -# ##########################10.2 END############################ - - -# ##########################10.3 START############################ -def wrong_filter_condition_10_3(): - message = "The condition used to filter the year when artists died is not correct." - code = "filt_10.3" - tldr = "Incorrect Condition" - matches = find_matches("if __expr__:\n" - " pass") - if matches: - for match in matches: - __expr__ = match["__expr__"] - if __expr__.numeric_logic_check(1, "var > 0") or __expr__.numeric_logic_check(1, "var != 0"): - return False - return explain_r(message, code, label=tldr) - return False - - -# ##########################10.3 END############################ - - -# ##########################10.4 START############################ -def wrong_and_filter_condition_10_4(): - message = ("The condition used to filter the temperatures " - "into the specified range of temperatures is not correct.") - code = "filt_and_10.4" - tldr = "Incorrect Condition Statement" - matches = find_matches("for _temp_ in _list_:\n" - " if __expr__:\n" - " pass") - if matches: - for match in matches: - _temp_ = match["_temp_"][0] - __expr__ = match["__expr__"] - if (__expr__.has(_temp_.astNode) and - not __expr__.numeric_logic_check(1, "32 <= temp <= 50")): - return explain_r(message, code, label=tldr) - return False - - -def wrong_nested_filter_condition_10_4(): - message = ("The decisions used to filter the temperatures into " - "the specified range of temperatures is not correct.") - code = "nest_filt_10.4" - tldr = "Incorrect Set of Decisions" - matches = find_matches("for _temp_ in _list_:\n" - " if __cond1__:\n" - " if __cond2__:\n" - " pass") - if matches: - for match in matches: - _temp_ = match["_temp_"][0].astNode - __cond1__ = match["__cond1__"] - __cond2__ = match["__cond2__"] - if not (__cond1__.has(_temp_) and __cond2__.has(_temp_) and ( - __cond1__.numeric_logic_check(1, "32 <= temp") and __cond2__.numeric_logic_check(1, "temp <= 50") or - __cond2__.numeric_logic_check(1, "32 <= temp") and - __cond1__.numeric_logic_check(1, "temp <= 50"))): - return explain_r(message, code, label=tldr) - return False - - -# ##########################10.4 END############################ - - -# ########################10.5 START############################### -def wrong_conversion_problem_10_5(): - message = "The conversion from kilometers to miles is not correct." - code = "conv_10.5" - tldr = "Incorrect Conversion" - matches = find_matches("for _item_ in ___:\n" - " __expr__") - if matches: - for match in matches: - _item_ = match["_item_"][0] - __expr__ = match["__expr__"] - matches02 = __expr__.find_matches("_item_*0.62") - if matches02: - return False - return explain_r(message, code, label=tldr) - return False - - -def wrong_filter_problem_atl1_10_5(): - """ - find pattern where expression is equal to _item_*0.62 and - where the condition is not equivalent to _expr_ > 10 - Returns: - """ - message = "You are not correctly filtering out values from the list." - code = "filt_alt1_10.5" - tldr = "Incorrect Filter Statement" - matches = find_matches("for _item_ in ___:\n" - " if __cond__:\n" - " _list_.append(__expr__)") - if matches: - for match in matches: - _item_ = match["_item_"][0].astNode - __cond__ = match["__cond__"] - __expr__ = match["__expr__"] - # matches02 = __expr__.find_matches("{0!s}*0.62".format(_item_.id)) - matches02 = __expr__.find_matches("_item_*0.62") - if matches02: - for match02 in matches02: - if (__cond__.has(_item_) and - not __cond__.numeric_logic_check(0.1, "item > 16.1290322580645")): - return explain_r(message, code, label=tldr) - return False - - -def wrong_filter_problem_atl2_10_5(): - message = "You are not correctly filtering out values from the list." - code = "filt_alt2_10.5" - tldr = "Incorrect Filter Statement" - matches = find_matches("for _item_ in ___:\n" - " _miles_ = __expr__\n" - " if __cond__:\n" - " _list_.append(_miles_)") - if matches: - for match in matches: - __expr__ = match["__expr__"] - __cond__ = match["__cond__"] - _item_ = match["_item_"][0].astNode - _miles_ = match["_miles_"][0].astNode - matches02 = __expr__.find_matches("_item_*0.62") - for _ in matches02: - if not (__cond__.has(_miles_) and - __cond__.numeric_logic_check(1, "_item_ > 10")): - return explain_r(message, code, label=tldr) - return False - - -def wrong_append_problem_atl1_10_5(): - message = "You are not appending the correct values.

    (app_alt1_10.5" - code = "app_alt1_10.5" - tldr = "Incorrect Value Appended" - matches = find_matches("for _item_ in ___:\n" - " if __cond__:\n" - " _list_.append(__expr__)") - if matches: - for match in matches: - _item_ = match["_item_"][0].astNode - __cond__ = match["__cond__"] - __expr__ = match["__expr__"] - if (__cond__.numeric_logic_check(0.1, "item > 16.1290322580645") and - __cond__.has(_item_)): - # new_code = "{}*0.62".format(_item_.id) - new_code = "_item_*0.62" - matches02 = __expr__.find_matches(new_code) - if not matches02: - return explain_r(message, code, label=tldr) - return False - - -def wrong_append_problem_atl2_10_5(): - message = "You are not appending the correct values." - code = "app_alt2_10.5" - tldr = "Incorrect Value Appended" - matches = find_matches("for _item_ in ___:\n" - " _miles_ = _item_ * 0.62\n" - " if __cond__:\n" - " _list_.append(_var_)") - for match in matches: - __cond__ = match["__cond__"] - _miles_ = match["_miles_"][0] - _var_ = match["_var_"][0] - if __cond__.has(_miles_) and __cond__.numeric_logic_check(1, "_miles_ > 10"): - if _var_.id != _miles_.id: - return explain_r(message, code, label=tldr) - return False - - -# ########################10.5 END############################### -def wrong_debug_10_6(): - """ - Should be on change feedback as opposed to on-run - Returns: - """ - message = "This is not one of the two changes needed. Undo the change and try again." - code = "debug_10.6" - tldr = "At least one unnecessary change" - matches = find_matches('quakes = earthquakes.get("location.depth","(None)","")\n' - 'quakes_in_miles = []\n' - 'for quake in _list1_:\n' - ' _list2_.append(quake * 0.62)\n' - 'plt.hist(quakes_in_miles)\n' - 'plt.xlabel("Depth in Miles")\n' - 'plt.ylabel("Number of Earthquakes")\n' - 'plt.title("Distribution of Depth in Miles of Earthquakes")\n' - 'plt.show()') - for match in matches: - name1 = match["_list1_"][0].ast_node.id - name2 = match["_list2_"][0].ast_node.id - master_list = ["quake", "quakes", "quakes_in_miles"] - if (name1 in master_list and name2 in master_list and - name1 != "quakes_in_miles" and name2 != "quakes" and - (name1 != "quake" or name2 != "quake")): - return False - return explain_r(message, code, label=tldr) - - -def wrong_debug_10_7(): - message = "This is not the change needed. Undo the change and try again." - code = "debug_10.7" - tldr = "At least one unnecessary change" - match = find_match("filtered_sentence_counts = []\n" - "book_sentence_counts = classics.get('metrics.statistics.sentences','(None)','')\n" - "for book in book_sentence_counts:\n" - " if book >= 5000:\n" - " filtered_sentence_counts.append(book)\n" - "plt.hist(filtered_sentence_counts)\n" - "plt.title('Distribution of Number of Sentences in Long Books')\n" - "plt.xlabel('Number of Sentences')\n" - "plt.ylabel('Number of Long Books')\n" - "plt.show()\n") - - if not match: - return explain_r(message, code, label=tldr) - return False - - -# ########################.....############################### -def wrong_initialization_in_iteration(): - message = ("You only need to initialize {0!s} once. " - "Remember that statements in an iteration block happens multiple times") - code = "wrong_init_in_iter" - tldr = "Initialization in Iteration" - matches = find_matches("for ___ in ___:\n" - " __expr__") - if matches: - for match in matches: - __expr__ = match["__expr__"] - submatches = __expr__.find_matches("_assign_ = __expr__", ) - if submatches: - for submatch in submatches: - __expr__sub = submatch["__expr__"] - _assign_ = submatch["_assign_"][0].astNode - if len(__expr__sub.find_all("Name")) == 0: - return explain_r(message.format(_assign_.id), code, label=tldr) - return False - - -def wrong_duplicate_var_in_add(): - message = "You are adding the same variable twice; you need two different variables in your addition." - code = "dup_var" - tldr = "Duplicate Division" - match = find_match("_item_ + _item_") - if match: - return explain_r(message, code, label=tldr) - return False - - -# ########################PLOTTING############################### -def plot_group_error(output=None, plots=None): - if output is None: - output = get_output() - if plots is None: - plots = get_plots() - if len(plots) > 1: - explain_r('You should only be plotting one thing!', "print_one", "Multiple Calls to plot") - return True - elif len(plots) == 0: - explain_r('The algorithm is plotting an empty list. Check your logic.', 'blank_plot', "Blank Plot") - return True - elif output: - explain('You should be plotting, not printing!', 'printing', "Printing instead of Plotting") - return True - elif len(plots[0]['data']) != 1: - explain('You should only be plotting one thing!', 'one_plot', "Too Many Plots") - return True - - -def all_labels_present(): # TODO: make sure it's before the show, maybe check for default values - """ - plt.title("Distribution of Number of Sentences in Long Books") - plt.xlabel("Number of Sentences") - plt.ylabel("Number of Long Books") - plt.show() - Returns: - """ - message = "Make sure you supply labels to all your axes and provide a title and then call show" - code = "labels_present" - tldr = "Missing Label(s)" - match = find_match("plt.title(___)\nplt.show()") - match02 = find_match("plt.xlabel(___)\nplt.show()") - match03 = find_match("plt.ylabel(___)\nplt.show()") - - if (not match) or (not match02) or (not match03): - return gently_r(message, code, label=tldr) - return False - - -def show_parens(): - message = "Make sure you add parenthesis to plt.show" - code = "show_parens" - tldr = "Incorrect Show" - if not find_match("plt.show"): - return gently_r() - return False - - -def hard_code_8_5(): # TODO: This one's weird - message = "Use iteration to calculate the sum." - code = "hard_code_8.5" - tldr = "Hard Coded Answer" - match = find_matches("print(__num__)") - if match: - for m in match: - __num__ = m["__num__"] - if len(__num__.find_all("Num")) > 0: - return explain_r(message, code, label=tldr) - return False diff --git a/src/lib/pedal/plugins/__init__.py b/src/lib/pedal/plugins/__init__.py deleted file mode 100644 index cc1889972d..0000000000 --- a/src/lib/pedal/plugins/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -''' -def default_pipeline(tifa=False, cait=True, sandbox=True): - next_section() - results = [] - if tifa: - results.append(tifa_analysis()) - if cait: - results.append(parse_program()) - if sandbox: - results.append(execute()) - return tuple(results) -''' diff --git a/src/lib/pedal/plugins/blockpy_compatibility.py b/src/lib/pedal/plugins/blockpy_compatibility.py deleted file mode 100644 index 660a3d84c6..0000000000 --- a/src/lib/pedal/plugins/blockpy_compatibility.py +++ /dev/null @@ -1,106 +0,0 @@ -class GracefulExit(Exception): - pass - - -class StudentData: - def __init__(self): - pass - - def get_names_by_type(self, type, exclude_builtins): - pass - - def get_values_by_type(self, type, exclude_builtins): - pass - - -student = StudentData() - - -def get_output(): - pass - - -def reset_output(): - pass - - -def queue_input(*inputs): - pass - - -def get_program(): - pass - - -def parse_program(): - pass - - -def had_execution_time_error(): - pass - - -def limit_execution_time(): - pass - - -def unlimit_execution_time(): - pass - - -def analyze_program(): - pass - - -def def_use_error(AstNode): - pass - - -class CorruptedAstNode: - def __init__(self): - pass - - -def find_match(instructor_code): - pass - - -def find_matches(instructor_code): - pass - - -class ASTMap: - def __init__(self, JSAstMap): - pass - - def get_std_name(self, id): - pass - - def get_std_exp(self, id): - pass - - -class AstNode: - def __init__(self, id): - pass - - def __eq__(self, other): - pass - - def numeric_logic_check(self, mag, expr): - pass - - def __str__(self): - pass - - def __repr__(self): - pass - - def __getattr__(self, key): - pass - - def has(self, AstNode): - pass - - def find_all(self, type): - pass diff --git a/src/lib/pedal/plugins/cmd_line.py b/src/lib/pedal/plugins/cmd_line.py deleted file mode 100644 index 65d7ef1a1e..0000000000 --- a/src/lib/pedal/plugins/cmd_line.py +++ /dev/null @@ -1,129 +0,0 @@ -from pedal.cait.cait_api import * -from pedal.report import MAIN_REPORT -from pedal.source import set_source -from pedal.tifa import tifa_analysis -from pedal.sandbox.compatibility import * -import importlib.util -import numpy as np -import pandas as pd - -import sys -import os -import re - - -def setup(student_code, input_vals): - """ - Clears MAIN_REPORT, sets source, and runs TIFA - Args: - student_code: String of student code - input_vals: list of inputs to be queued. - Returns: - None - """ - MAIN_REPORT.clear() - set_source(student_code) - tifa_analysis() - if len(input_vals) != 0: - queue_input(*input_vals) - run_student(True) - return get_sandbox() - - -def process(file, module, ins_code, report): - student_code1 = file.read() - setup(student_code1, inputs) # setup returns a sandbox object - module.loader.exec_module(ins_code) - feedback = report.feedback - return feedback - - -p2Flag = True -secrets = False -assignment_id = -1 -if __name__ == "__main__": - # processing args - feedback_code = sys.argv[1] - code_dir = sys.argv[2] - flag = sys.argv[3] - if flag == "-p2": - p2Flag = True - inputs = sys.argv[4:] - elif flag == "-secrets": - p2Flag = True - secrets = True - inputs = sys.argv[4:] - else: - inputs = sys.argv[3:] -else: - # feedback_suffix = "prequiz.py" - # assignment_id = 409 - feedback_suffix = "postquiz1.py" - assignment_id = 410 # Pass Count = 1 - # feedback_suffix = "postquiz2-1.py" - # assignment_id = 411 # Pass Count = 2 - # feedback_suffix = "postquiz2-2.py" - # assignment_id = 412 - # feedback_code = ("C:/Users/User/Documents/Luke_Stuff/Research/ComputationalThinking/DictionaryUnit/test_cmd/" - # "ins_script.py") - feedback_code = ("C:/Users/User/Documents/Luke_Stuff/Research/ComputationalThinking/" - "DictionaryUnit/ID/Assessments/") - feedback_code += feedback_suffix - - code_dir = ("C:/Users/User/Documents/Luke_Stuff/Research/ComputationalThinking/ResearchData/" - "ComputationalThinking/Tests/results/") - code_dir += "Spring2019/DictionaryData/cs1014_spr2019_log-v1/" - # code_dir += "Fall2018/DictionaryData/exported-f18/" - p2Flag = True - secrets = True - inputs = [] - -# Grabbing instructor feedback code -ins_mod = re.match("(?:.*/)(.*).py", feedback_code)[1] -my_spec = importlib.util.spec_from_file_location(ins_mod, feedback_code) -foo = importlib.util.module_from_spec(my_spec) - -# preparing to process - - -# Grabbing student files -if p2Flag: - student_feedback = [] - pass_count = 0 - main_table = "MainTable" - if secrets: - main_table += "-2" - main_table += ".csv" - df = pd.read_csv(code_dir + main_table) - code_states = code_dir + "CodeStates/" - for index, row in df.iterrows(): - scan = True - if assignment_id >= 0: - if secrets: - if int(row["AssignmentID"]) != assignment_id: - scan = False - if scan: - code_f = code_states + str(int(row['CodeStateID'])) + "/__main__.py" - # check assignment and find corresponding answer key in DictionaryUnit/ID/Assessments/... - with open(code_f) as code: - feedback_result = process(code, my_spec, foo, MAIN_REPORT) - # df.at[index, 'InterventionMessage'] = feedback_result - student_feedback.append(feedback_result) - score = 0.0 - if not feedback_result: - score = 1.0 - pass_count += 1 - df.at[index, 'Score'] = score - df.to_csv(code_dir + "processed.csv", index=False) -else: - student_feedback = [] - print(os.getcwd()) - student_files_base = os.listdir(code_dir) - student_files = [] - for code_name in student_files_base: - student_files.append(code_dir + code_name) - for code_name in student_files: - with open(code_name) as code_f: - student_feedback.append(process(code_f, my_spec, foo, MAIN_REPORT)) - if __name__ == "__main__": - print(student_feedback) diff --git a/src/lib/pedal/plugins/grade_magic.py b/src/lib/pedal/plugins/grade_magic.py deleted file mode 100644 index 695f45f72e..0000000000 --- a/src/lib/pedal/plugins/grade_magic.py +++ /dev/null @@ -1,389 +0,0 @@ -# Built-in imports -import json -import requests - -# IPython imports -from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic) -from IPython.display import Javascript, display -from IPython.utils.io import capture_output, CapturedIO - -# Logging imports -import os -import sys -from warnings import warn -# from traitlets import Bool -import time - -# TODO: Opportunity here to add in requests-cache. This would allow us to avoid -# the repeated trip. However, you'll need to handle expiring the cache in a -# smart way. One option is to write a command line script to just wipe as -# necessary. Simply deleting the cache file would be pretty easy, assuming it -# installs per user. - -# This really should come in as a configuration setting somewhere. -BLOCKPY_URL = 'https://think.cs.vt.edu/blockpy/blockpy/load_assignment_give_feedback' - - -def get_response_error(response): - """ - Transform a Response object into a friendlier string. - - Args: - response (requests.Response): A Requests reponse object to parse for - some kind of error. - Returns: - str: A string representation of the URL response. - """ - return "{} {}: {}".format(response.status_code, response.reason, - response.text) - - -def download_on_run(assignment_id): - """ - Download the on_run (give_feedback) code to use to test their solution. - - Args: - assignment_id (int OR str): The ID of the assignment to get the - on_run code for. - Returns: - bool: Whether or not the request was successful. - str: If unsuccesful, a message to display to the user. Otherwise, it'll - be the on_run code. - """ - data = {'assignment_id': assignment_id} - try: - response = requests.get(BLOCKPY_URL, data=data) - except Exception as error: - return False, str(error) - try: - result = response.json() - except ValueError: - # Failed to parse the JSON; perhaps it was some text data? - return False, get_response_error(response) - if result['success']: - return True, result['give_feedback'] - else: - return False, result['message'] - - -PEDAL_PIPELINE = ''' -from pedal.report import * -from pedal.report.imperative import * -clear_report() -from pedal.source import set_source -set_source({student_code}) -from pedal.tifa import tifa_analysis -tifa_analysis(True) -from pedal.sandbox.compatibility import * -queue_input({inputs}) -run_student(True) -student = get_sandbox() -from pedal.cait.cait_api import parse_program -{on_run} -from pedal.resolvers import simple -SUCCESS, SCORE, CATEGORY, LABEL, MESSAGE, DATA, HIDE = simple.resolve() -''' - - -def blockpy_grade(assignment_id, student_code, inputs): - """ - Helper function to capture the request from the server. - - Args: - assignment_id (int): The assignment ID to look up and use the on_run - code for. - student_code (str): The code that was written by the student. - - inputs (str): The inputs to queue into the assignment - - Returns: - str: The HTML formatted feedback for the student. - """ - successful_download, on_run = download_on_run(assignment_id) - # If it failed, let's display some information about why. - if not successful_download: - return on_run - return execute_on_run_code(on_run, student_code, inputs) - - -def execute_on_run_code(on_run, student_code, inputs): - """ - Actually execute the on_run code for the given student code. - """ - # Even though the student code is a string, we need to escape it to prevent - # any weirdness from being in the instructor code. - escaped_student_code = json.dumps(student_code) - instructor_code = PEDAL_PIPELINE.format(on_run=on_run, - student_code=escaped_student_code, - # inputs=','.join(inputs)) - inputs=inputs) - # Execute the instructor code in a new environment - global_variables = globals() - compiled_code = compile(instructor_code, 'instructor_code.py', 'exec') - exec(compiled_code, global_variables) - category = global_variables['CATEGORY'] - label = global_variables['LABEL'] - message = global_variables['MESSAGE'] - # In some cases, we might want to override how the text is rendered. - if category.lower() == 'instructor' and label.lower() == 'explain': - category = "Instructor Feedback" - label = '' - # Return the result as HTML - return '''{}: {}
    {}'''.format(category, label, message) - - -# The following string literals are used to create the JavaScript code that -# creates the Python code that will execute the instructor's feedback code -# using the student's Python code. - -# Extract out the student code, embed the result -EXTRACT_STUDENT_CODE = r""" -// Convert Notebook cells to a string of Python code -var makePython = function(cell) { - if (cell.cell_type == "code") { - // Code is embedded unchanged, unless it is magic - var source = cell.get_text(); - if (source.startsWith('%')) { - // Skip magic - return ''; - } else { - return source; - } - } else if (cell.cell_type == "markdown" || - cell.cell_type == "raw") { - // Markdown and text is wrapped in a string. - var escaped_text = cell.get_text().replace(/'''/g, "\\'\\'\\'"); - return "'''"+escaped_text+"'''"; - } -} -var isUsable = function(cell) { - return cell.cell_type == "code" || - cell.cell_type == "markdown" || - cell.cell_type == "raw"; -} -var cells = Jupyter.notebook.get_cells(); -var source_code = cells.filter(isUsable).map(makePython).join("\n"); -source_code = JSON.stringify(source_code); -console.log(source_code); -// Start constructing the feedback code (which will be Python). -var on_run_code = []; -on_run_code.push("student_code="+source_code); -""" - -# Retrieve the last cell, and also recolor it a little for style -ANIMATE_LAST_CELL = r""" -// While we are accessing the server, recolor the last cell a little. -var last = null; -if (cells.length > 0) { - last = cells[cells.length-1]; - $(last.element).animate({"background-color": "#E0E6FF"}, 1000); -} -""" - -# If the %grade magic is used, we run the code directly. -LOCAL_GRADE = r''' -on_run_code.push("from pedal.plugins.grade_magic import execute_on_run_code"); -on_run_code.push('print(execute_on_run_code({on_run_code}, student_code, {inputs}))'); -''' - -# If the %grade_blockpy magic is used, we need to get the on_run from blockpy. -BLOCKPY_GRADE = r''' -on_run_code.push("from pedal.plugins.grade_magic import blockpy_grade"); -on_run_code.push('import json') -on_run_code.push('inputs = {inputs}') -console.log('inputs = {inputs}') -on_run_code.push("print(blockpy_grade({assignment}, student_code, inputs))"); -''' - -# This chunk actually performs the on_run code execution using the kernel. -EXECUTE_CODE = r''' -on_run_code = on_run_code.join("\n"); -console.log(on_run_code); -var kernel = IPython.notebook.kernel; -if (kernel !== null) { - var t = kernel.execute(on_run_code, { 'iopub' : {'output' : function(x) { - if (x.msg_type == "error") { - // If this was an error, show the traceback properly. - if (last !== null) { - last.output_area.append_error(x.content); - console.error(x); - } else { - console.error("Could not append to final cell.", x); - } - } else if (!x.content.data && x.content.text) { - // If it was valid data, we show it as HTML. - console.log(x); - element.html(x.content.text.replace(/\n/g, "
    ")); - } else { - // I'm not sure what it is - better dump it on the console. - console.log(x); - } - // Decolor the last cell if it was there. - if (last !== null) { - last = cells[cells.length-1]; - $(last.element).animate({"background-color": "white"}, 1000); - } - }}}); -}''' - - -@magics_class -class GradeMagic(Magics): - """ - This class holds the magic for the %grade and %grade_blockpy - """ - - @line_magic - def grade_logstart(self, line=""): - # ######Logging - ts = time.time() - logger = self.shell.logger # logging - old_logfile = self.shell.logfile # logging - directory = os.path.expanduser("log_folder{}~/".format(line)) - logfname = os.path.expanduser("log_folder{}~/log_{}.py~".format(line, ts)) - self.shell.logfile = logfname - loghead = u'# IPython log file\n\n' - try: - os.makedirs(directory, exist_ok=True) - logger.logstart(logfname, loghead, 'rotate', True, True, - True) - except BaseException: - self.shell.logfile = old_logfile - warn("Couldn't start log: %s" % sys.exc_info()[1]) - self.shell.run_code("input = __builtins__.input") - self.shell.run_code("print = __builtins__.print") - self.shell.run_code("sum = __builtins__.sum") - self.shell.run_code("len = __builtins__.len") - - @line_magic - def grade_logstop(self, line=""): - self.shell.logger.logstop() - - def logging(self): - # ######Logging - ts = time.time() - logger = self.shell.logger # logging - old_logfile = self.shell.logfile # logging - logfname = os.path.expanduser("log_folder~/log_{}.py~".format(ts)) - self.shell.logfile = logfname - loghead = u'# IPython log file\n\n' - try: - logger.logstart(logfname, loghead, 'rotate', False, True, - True) - except BaseException: - self.shell.logfile = old_logfile - warn("Couldn't start log: %s" % sys.exc_info()[1]) - logger.timestamp = False - input_hist = self.shell.history_manager.input_hist_raw - logger.log_write(u'\n'.join(input_hist[1:])) - logger.log_write(u'\n') - logger.timestamp = True - self.shell.logger.logstop() - # ######Logging - - # noinspection PyMethodMayBeStatic - def grade_parser(self, line, cell=None): - if ',' in line: - if cell is None: - assignment, line = line.split(",", maxsplit=1) - else: - assignment = None - inputs = json.dumps(line.split(",")) - inputs = "\\'" + inputs[1:len(inputs) - 1] + "\\'" - else: - if cell is None: - assignment, inputs = line, "" - else: - inputs = line - assignment = "" - inputs = json.dumps(inputs) - return {"inputs": inputs, "assignment": assignment} - - # noinspection PyMethodMayBeStatic - def unified_helper(self, local_code, **kwargs): - code = EXTRACT_STUDENT_CODE - code += ANIMATE_LAST_CELL - code += local_code.format(**kwargs) - code += EXECUTE_CODE - return code - - @cell_magic - def grade(self, line="", cell=""): - dump = self.grade_parser(line, cell) - code = self.unified_helper(LOCAL_GRADE, on_run_code="INSTRUCTOR_CODE", inputs=dump['inputs']) - cell = cell.replace("\\", "\\\\") - cell = cell.replace("\n", "\\n") - cell = cell.replace("'", "\\'") - cell = cell.replace('"', '\\"') - # Runs this code in the kernel as python code - # Can also run compiled code - self.shell.run_code("INSTRUCTOR_CODE = " + '"' + cell + '"') - # TODO: This was the easier way for me to get this to work - # This might be worth using in more depth to have less translation - # to and from javascript. See usage_examples - return display(Javascript(code)) - - @line_cell_magic - def usage_examples(self, line="", cell="print('running cell')\nprint('running cell2')"): - # Runs code in the kernel's context - self.shell.run_code("print('fun')") - - # Runs code in kernel's context using compiled code - sample = compile(cell, "usage_examples.py", "exec") - self.shell.run_code(sample) - - # runs javascript code - self.shell.run_cell_magic("javascript", "", "console.log('I do JAVASCRIPT');\n") - # Maybe can use javascript execution to pass things around...not sure though...can't get it to work - # You can pass values, but it doesn't seem to work unless you run it again. - # https://michhar.github.io/javascript-and-python-have-a-party/ - - self.shell.run_cell_magic( - "javascript", "", - # js_code = Javascript( - """var callbacks = { iopub : { output: function(out_data){ console.log(out_data) } } };\n""" - """var code = "fun = 12";\n""" - """IPython.notebook.kernel.execute(code);\n""") - # handle = display(js_code, display_id="usage_examples") - # handle.update(handle) - self.shell.run_cell_magic("javascript", "", "console.log('I do JAVASCRIPT TOO!!');\n") - # captures standard output, standard error, etc. and stops or not stops it - # class IPython.utils.capture.capture_output(stdout=True, stderr=True, display=True) - # Note that Tracebacks aren't put in standard error? - with capture_output(True, False, False) as captured: - print(dir(self)) - self.shell.run_code("print(fun)") - sys.stderr.write("spam\n") - print("I captured stdout") - print(captured.stdout) - print("I captured stderr") - print(captured.stderr) - - @line_magic - def grade_blockpy(self, line=""): - dump = self.grade_parser(line) - code = self.unified_helper(BLOCKPY_GRADE, assignment=dump["assignment"], inputs=dump["inputs"]) - return display(Javascript(code)) - - -def load_ipython_extension(ipython): - """ - Register this plugin with Jupyter Notebooks. Although it is allegedly - necessary in order to make this a plugin, we do not actually use it. - """ - ipython.register_magics(GradeMagic) - - -""" -DEPRECATED: The following lines of code do not seem to be necessary to - register this plugin with Jupyter. -def _jupyter_server_extension_paths(): - return [{ - "module": "pedal.plugins.grade_magic" - }] - -# jupyter serverextension enable --py pedal.plugins.grade_magic -def load_jupyter_server_extension(nbapp): - from IPython import get_ipython - get_ipython().register_magics(GradeMagic) -""" diff --git a/src/lib/pedal/plugins/test_reference_solution.py b/src/lib/pedal/plugins/test_reference_solution.py deleted file mode 100644 index 1cbb7bd432..0000000000 --- a/src/lib/pedal/plugins/test_reference_solution.py +++ /dev/null @@ -1,142 +0,0 @@ -''' -Tool for running a Grading script through a series of student reference -solutions. - -python -m pedal.plugins.test_reference_solution -''' - -# Runner -from pedal.report.imperative import clear_report, MAIN_REPORT -from pedal.cait import parse_program -import sys -import os -from io import StringIO -from contextlib import redirect_stdout -import unittest -from unittest.mock import patch, mock_open -import argparse - -# Arguments -DEFAULT_REFERENCE_SOLUTIONS_DIR = "reference_solutions/" - - -class TestReferenceSolutions(unittest.TestCase): - maxDiff = None - - -def substitute_args(arg, student_path, seed): - if arg == "$_STUDENT_MAIN": - return student_path - elif arg == "$_STUDENT_NAME": - return seed - return arg - - -def add_test(class_, name, python_file, - expected_output_path, expected_output, - grader_code, grader_path, grader_args, student_path): - seed = find_seed(python_file) - grader_args = [substitute_args(arg, student_path, seed) for arg in grader_args] - - def _inner_test(self): - captured_output = StringIO() - with redirect_stdout(captured_output): - # TODO: mock_open will only work if we are not anticipating - # the student or instructor to open files... - with patch('builtins.open', mock_open(read_data=python_file), - create=True): - with patch.object(sys, 'argv', grader_args): - clear_report() - grader_exec = compile(grader_code, grader_path, 'exec') - exec(grader_exec, globals()) - # print(repr(MAIN_REPORT.feedback[0].mistake['error'])) - actual_output = captured_output.getvalue() - if expected_output is None: - print("File not found:", expected_output_path) - with open(expected_output_path, 'w') as out: - out.write(actual_output) - print("\tCreated missing file with current output") - else: - self.assertEqual(actual_output, expected_output) - - setattr(class_, 'test_' + name, _inner_test) - - -def find_seed(python_code): - try: - ast = parse_program(python_code) - for assign in ast.find_all("Assign"): - if assign.targets[0].ast_name != "Name": - continue - if assign.targets[0].id == "__STUDENT_SEED__": - if assign.value.ast_name == "Str": - return assign.value.s - elif assign.value.ast_name == "Num": - return assign.value.n - elif assign.value.ast_name == "List": - return [e.n for e in assign.value.elts] - except SyntaxError: - return 0 - return 0 - - -# Load reference solutions -def add_all_tests(grader_path, reference_solutions_dir, grader_args, limit): - # Load grader file - with open(grader_path, 'r') as grader_file: - grader_code = grader_file.read() - for filename in os.listdir(reference_solutions_dir): - if limit is not None and limit != filename: - continue - path = os.path.join(reference_solutions_dir, filename) - if path.endswith(".py"): - text_path = path[:-2] + "txt" - with open(path, 'r') as python_file: - python = python_file.read() - if os.path.exists(text_path): - with open(text_path, 'r') as output_file: - output = output_file.read() - else: - output = None - add_test(TestReferenceSolutions, filename[:-3], python, - text_path, output, - grader_code, grader_path, grader_args, path) - - -def run_tests(): - unittest.main(argv=['first-arg-is-ignored']) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Run instructor grading script on a collection of reference solutions') - parser.add_argument('grader', help='The path to the instructor grading script.') - parser.add_argument('--path', '-p', - help='The path to the student reference files. If not given, assumed to be in the same folder ' - 'as the instructor grading script.', - default=DEFAULT_REFERENCE_SOLUTIONS_DIR) - parser.add_argument('--args', '-a', - help='Pass in arguments that the grading script will use. ' - 'Variable substitutions include "$_STUDENT_MAIN".', - default='test_reference_solution.py,$_STUDENT_MAIN,$_STUDENT_NAME') - parser.add_argument('--limit', '-l', help='Limit to a specific file.', default=None) - args = parser.parse_args() - - # Turn the reference solutions path into an absolute filename - if os.path.isabs(args.path): - reference_solutions_path = args.path - else: - reference_solutions_path = os.path.join(os.path.dirname(args.grader), args.path) - - # If no reference solutions folder, let's make it - if not os.path.exists(reference_solutions_path): - os.mkdir(reference_solutions_path) - - # Fix up the passed in args - grader_args = args.args.split(",") - - # Check that we actually have some files to try out - if not os.listdir(reference_solutions_path): - print("No reference solutions found") - else: - add_all_tests(args.grader, reference_solutions_path, grader_args, args.limit) - run_tests() diff --git a/src/lib/pedal/plugins/vpl.py b/src/lib/pedal/plugins/vpl.py deleted file mode 100644 index d1b61a5cd6..0000000000 --- a/src/lib/pedal/plugins/vpl.py +++ /dev/null @@ -1,148 +0,0 @@ -from pedal.plugins.vpl_unittest import UnitTestedAssignment - -""" -Some kind of function to break up the sections -""" -import re -import sys -from html.parser import HTMLParser - -from pedal.report import MAIN_REPORT -from pedal import source -from pedal.resolvers import sectional -from pedal.cait.cait_api import expire_cait_cache - - -class VPLStyler(HTMLParser): - HEADERS = ("h1", "h2", "h3", "h4", "h5") - - def __init__(self): - super().__init__() - self.reset() - self.fed = [] - self.inside_pre = False - - def convert(self, html): - self.feed(html) - return self.get_data() - - @property - def text(self): - return ''.join(self.fed) - - def get_data(self): - return self.text - - def force_new_line(self): - if self.text and self.text[-1] not in ("\n", "\r"): - self.fed.append("\n") - - def handle_starttag(self, tag, attrs): - if tag in self.HEADERS: - self.force_new_line() - self.fed.append("-") - elif tag in ("pre",): - self.force_new_line() - self.fed.append(">") - self.inside_pre = True - - def handle_data(self, data): - if self.inside_pre: - # Need to prepend ">" to the start of new lines. - self.fed.append(data.replace("\n", "\n>")) - else: - self.fed.append(data) - - def handle_endtag(self, tag): - if tag in self.HEADERS: - self.fed.append("") - elif tag in ("pre",): - self.fed.append("") - self.inside_pre = False - - -def strip_tags(html): - return VPLStyler().convert(html) - - -def set_maximum_score(number, cap=True, report=None): - if report is None: - report = MAIN_REPORT - report['vpl']['score_maximum'] = number - report['vpl']['score_cap'] = cap - - -def resolve(report=None, custom_success_message=None): - if report is None: - report = MAIN_REPORT - print("<|--") - success, score, hc, messages_by_group = sectional.resolve(report) - last_group = 0 - for group, messages in sorted(messages_by_group.items()): - if group != last_group: - for intermediate_section in range(last_group, group, 2): - print("-" + report['source']['sections'][1 + intermediate_section]) - printed_first_bad = False - for message in messages: - if message['priority'] in ('positive', 'instructions'): - print(strip_tags(message['message'])) - elif not printed_first_bad: - print(strip_tags(message['message'])) - printed_first_bad = True - last_group = group - print("-Overall") - if success: - if custom_success_message is None: - print("Complete! Great job!") - else: - print(custom_success_message) - else: - print("Incomplete") - print("--|>") - print("Grade :=>>", round(score)) - - -class SectionalAssignment: - max_points = 1 - sections = None - - def __init__(self, filename=None, max_points=None, report=None): - self.report = MAIN_REPORT if report is None else report - find_file(filename if filename else self.filename, - sections=True, report=report) - set_maximum_score(self.max_points - if max_points is None else max_points) - source.check_section_exists(self.sections) - - def pre_test(self): - source.next_section() - verified = source.verify_section() - expire_cait_cache() - return verified - - def post_test(self): - return True - - def resolve(self): - checks = ((self.pre_test() and - getattr(self, attr)() and - self.post_test()) - for attr in dir(self) - if attr.startswith('test_') and - callable(getattr(self, attr))) - if all(checks): - self.report.set_success() - resolve(report=self.report) - - -from pedal.plugins.vpl_unittest import UnitTestedAssignment - - -def unittest_resolver(phases, report=None, custom_success_message=None): - success = True - for title, phase in phases: - outcome = phase()._run_all_tests() - if not outcome: - break - success = success and outcome - resolve(custom_success_message=custom_success_message) diff --git a/src/lib/pedal/plugins/vpl_safe_runner.py b/src/lib/pedal/plugins/vpl_safe_runner.py deleted file mode 100644 index 7bba8e6e44..0000000000 --- a/src/lib/pedal/plugins/vpl_safe_runner.py +++ /dev/null @@ -1,10 +0,0 @@ -from pedal import run -from pedal import set_source_file -import sys - -if __name__ == "__main__": - set_source_file(sys.argv[1] if len(sys.argv) > 1 else 'main.py') - student = run(context=False) - print(student.raw_output) - if student.exception: - print(student.exception_formatted, file=sys.stderr) diff --git a/src/lib/pedal/plugins/vpl_unittest.py b/src/lib/pedal/plugins/vpl_unittest.py deleted file mode 100644 index 09e4c08119..0000000000 --- a/src/lib/pedal/plugins/vpl_unittest.py +++ /dev/null @@ -1,112 +0,0 @@ -from unittest.util import safe_repr -from pedal import gently -from pedal.assertions.assertions import _normalize_string - - -class UnitTestedAssignment: - DELTA = .001 - - class AssertionException(Exception): - def __init__(self, message): - self.message = message - - def __init__(self): - pass - - def setUp(self): - pass - - def tearDown(self): - pass - - def _run_all_tests(self): - methods = [func for func in dir(self) - if callable(getattr(self, func)) and - func.startswith('test_')] - all_passed = True - for method in methods: - self.setUp() - try: - getattr(self, method)() - except UnitTestedAssignment.AssertionException as e: - gently(e.message) - all_passed = False - self.tearDown() - return all_passed - - def assertSimilarStrings(self, first, second, msg): - if _normalize_string(first) != _normalize_string(second): - return self.assertEqual(first, second, msg, exact=True) - - def assertNotSimilarStrings(self, first, second, msg): - if _normalize_string(first) == _normalize_string(second): - return self.assertEqual(first, second, msg, exact=True) - - def assertLessEqual(self, val1, val2, msg=None): - if not (val1 <= val2): - self.fail(msg, "{} is not less than or equal to {}".format(safe_repr(val1), safe_repr(val2))) - - def assertGreaterEqual(self, val1, val2, msg=None): - if not (val1 >= val2): - self.fail(msg, "{} is not greater than or equal to {}".format(safe_repr(val1), safe_repr(val2))) - - def assertNotEqual(self, val1, val2, msg=None, exact=False): - if val1 != val2: - return - if not exact and isinstance(val1, str) and isinstance(val2, str): - self.assertNotSimilarStrings(val1, val2, msg) - elif (not exact and isinstance(val1, (int, float)) and - isinstance(val2, (int, float))): - if abs(val2 - val1) > UnitTestedAssignment.DELTA: - return - standardMsg = "{} == {}".format(safe_repr(val1), safe_repr(val2)) - self.fail(msg, standardMsg) - - def assertEqual(self, val1, val2, msg=None, exact=False): - if val1 == val2: - return - if not exact and isinstance(val1, str) and isinstance(val2, str): - self.assertSimilarStrings(val1, val2, msg) - elif (not exact and isinstance(val1, (int, float)) and - isinstance(val2, (int, float))): - if abs(val2 - val1) <= UnitTestedAssignment.DELTA: - return - standardMsg = "{} != {}".format(safe_repr(val1), safe_repr(val2)) - self.fail(msg, standardMsg) - - def assertIn(self, member, container, msg=None): - if member not in container: - standardMsg = "{} not found in {}".format(safe_repr(member), - safe_repr(container)) - self.fail(msg, standardMsg) - - def assertNotIn(self, member, container, msg=None): - if member in container: - standardMsg = "{} found in {}".format(safe_repr(member), - safe_repr(container)) - self.fail(msg, standardMsg) - - def assertTrue(self, value, msg=None): - if not value: - self.fail(msg, "{} is not true".format(value)) - - def assertFalse(self, value, msg=None): - if value: - self.fail(msg, "{} is not false".format(value)) - - def assertSandbox(self, sandbox, msg=None): - if sandbox.exception is not None: - self.fail(msg, sandbox.format_exception()) - - def assertIsInstance(self, value, parent, msg=None): - if not isinstance(value, parent): - self.fail(msg, "{} is not an instance of {}".format(safe_repr(value), safe_repr(parent))) - - def assertHasAttr(self, object, attr, msg=None): - if not hasattr(object, attr): - self.fail(msg, "{} does not have an attribute named {}".format(safe_repr(object), safe_repr(attr))) - - def fail(self, message, standardMsg): - if message is None: - message = standardMsg - raise UnitTestedAssignment.AssertionException(message) diff --git a/src/lib/pedal/questions/__init__.py b/src/lib/pedal/questions/__init__.py deleted file mode 100644 index ce0380282b..0000000000 --- a/src/lib/pedal/questions/__init__.py +++ /dev/null @@ -1,123 +0,0 @@ -""" -A tool for providing dynamic questions to learners. -""" - -NAME = 'Questions' -SHORT_DESCRIPTION = "Provides dynamic questions to learners" -DESCRIPTION = ''' -''' -REQUIRES = [] -OPTIONALS = [] -CATEGORY = 'Instructions' - -__all__ = ['NAME', 'DESCRIPTION', 'SHORT_DESCRIPTION', 'REQUIRES', 'OPTIONALS', - 'Question', 'Pool', 'set_seed'] - -from pedal.report.imperative import MAIN_REPORT, gently -from pedal.questions.setup import _setup_questions, set_seed, _name_hash -from pedal.questions.loader import load_question, SETTING_SHOW_CASE_DETAILS - - -class QuestionGrader: - def _get_functions_with_filter(self, filter='grade_'): - return [getattr(self, method_name) for method_name in dir(self) - if method_name.startswith(filter) and - callable(getattr(self, method_name))] - - def _test(self, question): - methods = self._get_functions_with_filter() - for method in methods: - method(question) - - -class Question: - def __init__(self, name, instructions, tests, seed=None, report=None): - self.name = name - self.instructions = instructions - self.tests = tests - self.seed = seed - if report is None: - report = MAIN_REPORT - self.report = report - self.answered = False - - def answer(self): - self.answered = True - - def ask(self): - if isinstance(self.tests, QuestionGrader): - self.tests._test(self) - else: - for test in self.tests: - test(self) - if not self.answered: - show_question(self.instructions, self.report) - - -def show_question(instructions, report=None): - if report is None: - report = MAIN_REPORT - report.attach('Question', category='Instructions', tool='Questions', - group=report.group, priority='instructions', hint=instructions) - - -class Pool: - _POOL_TRACKER = 0 - - def __init__(self, name, choices, seed=None, report=None, position=None): - self.name = name - self.choices = choices - self.seed = seed - if report is None: - report = MAIN_REPORT - self.report = report - if position is None: - position = Pool._POOL_TRACKER - Pool._POOL_TRACKER += 1 - self.position = position - - def choose(self, force=None): - _setup_questions(self.report) - if force is None: - if self.seed is None: - force = self.report['questions']['seed'] - if isinstance(force, str): - force = _name_hash(force + self.name) - # Assume iterable; could be check that throws better error - if not isinstance(force, int): - force = force[self.position] - else: - force = self.seed - return self.choices[force % len(self.choices)] - - @property - def answered(self): - for choice in self.choices: - if choice.answered: - return True - return False - - -def check_pool_exam(name, questions, force=None, seed=None): - _setup_questions(MAIN_REPORT) - # Choose a question - if force is None: - if seed is None: - force = MAIN_REPORT['questions']['seed'] - if isinstance(force, str): - force = _name_hash(force + name) - else: - force = seed - elif isinstance(force, str): - force = _name_hash(force + name) - question = questions[force % len(questions)] - # Ask it - show_question(question['instructions']) - # Check if they're done - if 'settings' not in question: - question['settings'] = {} - question['settings'][SETTING_SHOW_CASE_DETAILS] = False - results = list(load_question(question)) - if results: - message, label = results[0] - gently(message, label=label) diff --git a/src/lib/pedal/questions/design.md b/src/lib/pedal/questions/design.md deleted file mode 100644 index b38978ff92..0000000000 --- a/src/lib/pedal/questions/design.md +++ /dev/null @@ -1,92 +0,0 @@ -# Questions Tool - -The questions model flips the script of Feedback generation to also generate -instructions. It is assumed that an environment would provide some initial -meta-instruction, and that initial evaluation would generate some new question -specific instructions. - -Taxonomy: -* Question: A bundle of instructions and tests that can be delivered to a - student, not as feedback, but as a narrowing/modification of the - original problem. -* Pool: A collection of Questions that can be drawn from randomly to - individualize the student experiment. -* Instructions: The (HTML) text rendered to the learner to prepare them for - a question. The default is to assume that this would be static, - but more interesting possibilities could occur. -* Tests: The collection of feedback control logic that is bundled for this - question. This speaks to the idea of better encapsulation for - the control logic - perhaps it is time for the Organizers from - Assertions to be promoted to their own Tool? -* Seed: A value (expected to be constant for a given user) that can act as - an "offset" for selecting problems. This allows users to - deterministically receive feedback from a feedback engine. - Numeric seeds allow specifically selecting questions from the pool, - while String seeds are hashed to "random" indexes. An example is to - use student usernames, emails, or internal IDs (probably better to - treat numeric IDs as strings). - -# Delivery - -By default, new question text is delivered by the Resolver as feedback -that appears at the top of all the other feedback without preventing any -subsequent feedback, similar to the way Compliments do not prevent actual -feedback. - -However, Resolvers probably want to override this. For example, BlockPy would -probably want to modify the problem instructions area. VPL would probably -want to isolate the instructions to their own group or to provide a header with -them. - -# Timing - -Here are a few different models of the timing of questions: -1. The student requests initial feedback, and all questions appear. -2. The student indicates in some way which question they want to appear, - and that question's Instructions appear. -3. Students are given a single initial question, and when they complete it, - the text of a new question appears. -4. Instead of a single question appearing, the students are presented with - a choice of questions (see CYOA). - -# Random Pool - -Frequently, instructors need to be able to draw a question from a pool. - -A design principle based on research is that questions should be as equal -in difficulty and learning objectives as possible. Granted - there are -pedagogical design decisions that could justify breaking that guideline. -We should encourage question equivalency but allow the instructor to have -wildly different questions. - -In theory, question selection doesn't have to be random. Subclasses -could be created that draw on data sources about the user - these could -be local data sources ("You struggled a lot on the last question, so let's -try another one that's similar") or more exotic ("My records indicate that you -haven't really mastered IF statements, so let's do some practice with that.") - -# Templated Questions - -Being able to generate questions based on a template or some rules appears -to be a popular request. In the Random Pool model, we had a static set of -Questions. But now we have a series of rules for generating questions. - -TemplateQuestion is a class for creating questions from finite sets of terms. -You provide a set of variables, some templated text (Python Format? Jinja2?), -and the set of values for variables. From this, questions could be automatically -generated. - -DynamicQuestion is a more general-purpose class for true dynamic generation of -problems. The model here would be to subclass and redefine components -by overriding methods. - -# CYOA - -One of the more interesting ideas is to support Choose-Your-Own-Adventure -style chains of questions. In this model, completing a question could unlock -multiple paths to move forward on. - -Open Questions: -* How do students indicate a choice along the path? -* How do we elegantly connect Decision A with Paths B, C, and D; keeping - in mind that game flow is a DAG or possibly even a graph. diff --git a/src/lib/pedal/questions/graders.py b/src/lib/pedal/questions/graders.py deleted file mode 100644 index 608b550cdd..0000000000 --- a/src/lib/pedal/questions/graders.py +++ /dev/null @@ -1,106 +0,0 @@ -from pedal.questions import QuestionGrader - -from pedal import run, compliment, explain, gently -from pedal.report.imperative import MAIN_REPORT -from pedal.assertions.assertions import * -from pedal.toolkit.functions import * - - -class FunctionGrader(QuestionGrader): - MAX_POINTS = 10 - DEFINITION_POINTS = 3 - COMPONENTS_POINTS = 1 - MAX_COMPONENTS_POINTS = 2 - UNIT_TEST_TYPE_POINTS = None - UNIT_TEST_VALUE_POINTS = None - UNIT_TEST_TOTAL_POINTS = 5 - UNIT_TEST_TYPE_RATIO = .5 - UNIT_TEST_COMPLETION_POINTS = 2 - - def __init__(self, function_name, signature, tests): - super().__init__() - self.function_name = function_name - self.signature = signature - self.tests = tests - self.points = 0 - - def _test(self, question): - defined = self.grade_definition(question) - - if not defined: - return self.report_status(question) - - self.grade_components(question) - - passed_tests = self.grade_unit_tests(question) - if not passed_tests: - return self.report_status(question) - - self.report_success(question) - - def report_status(self, question): - pass - - def report_success(self, question): - question.answer() - - def grade_definition(self, question): - self.student = run(report_exceptions=True, context=False) - self.student.report_exceptions_mode = False - - self.definition = match_signature_muted(self.function_name, *self.signature) - if not assertGenerally(self.definition): - gently("Function not defined") - return False - - if self.student.exception: - return False - if not assertHasFunction(self.student, self.function_name): - gently("Function defined incorrectly") - return False - - self.points += self.DEFINITION_POINTS - return True - - def grade_components(self, question): - self.component_points = 0 - components = self._get_functions_with_filter('grade_component_') - for component in components: - component(question) - self.component_points = min(self.component_points, self.MAX_COMPONENTS_POINTS) - self.points += self.component_points - - def assertEqual(self, *parameters): - return assertEqual(*parameters) - - def grade_unit_tests(self, question): - all_good = True - if self.UNIT_TEST_TOTAL_POINTS is None: - TYPE_POINT_ADD = self.UNIT_TEST_TYPE_POINTS - VALUE_POINT_ADD = self.UNIT_TEST_VALUE_POINTS - else: - ratio = self.UNIT_TEST_TYPE_RATIO - TYPE_POINT_ADD = (self.UNIT_TEST_TOTAL_POINTS / len(self.tests) * (ratio)) - VALUE_POINT_ADD = (self.UNIT_TEST_TOTAL_POINTS / len(self.tests) * (1 - ratio)) - for arguments, expected in self.tests: - # import sys - # print(repr(arguments), file=sys.stderr) - result = self.student.call(self.function_name, *arguments, context=False) - # print(repr(self.student.exception), file=sys.stderr) - if self.student.exception: - all_good = False - continue - if assertIsInstance(result, type(expected)): - self.points += TYPE_POINT_ADD - else: - all_good = False - continue - if self.assertEqual(result, expected): - self.points += VALUE_POINT_ADD - else: - all_good = False - if all_good: - self.points += self.UNIT_TEST_COMPLETION_POINTS - else: - gently("Failing instructor unit tests") - return all_good diff --git a/src/lib/pedal/questions/loader.py b/src/lib/pedal/questions/loader.py deleted file mode 100644 index 603c1de2f1..0000000000 --- a/src/lib/pedal/questions/loader.py +++ /dev/null @@ -1,496 +0,0 @@ -""" -instructions: blah blah blah - -settings: - tifa: - enabled: True - unit test by function (bool): Whether to test each function entirely before moving onto the - next one, or to first check that all functions have been defined, and then - checking their parameters, etc. Defaults to True. - show case details (bool): Whether to show the specific args/inputs that caused a test case - to fail. -rubric: - functions: - total: 100 - definition: 10 - signature: 10 - cases: 80 -global: - variables: - name: - type: - value: - inputs: - prints: -# Sandbox, type checking -functions: - documentation: "any" or "google" - coverage: 100% - tests: int - name: do_complicated_stuff - arity: int - signature: int, int -> float - signature: int, int, list[int], (int->str), dict[str:list[int]] -> list[int] - parameters: - name: banana - exactly: - regex: - includes: - within: - type: int - cases: - - arguments (list): 5, 4 - inputs (list): - returns (Any): - equals: 27.3 - is: - is not: _1 - name (str): Meaningful name for tracking purposes? Or possibly separate into label/id/code - hint (str): Message to display to user - prints: - exactly: - regex: - startswith: - endswith: - plots: -# Cait -syntax: - prevent: - ___ + ___ -# Override any of our default feedback messages -messages: - FUNCTION_NOT_DEFINED: "Oops you missed a function" -""" -from pedal.report.imperative import set_success, give_partial - -from pedal.sandbox.compatibility import _check_sandbox -from pedal.toolkit.printing import * -from pedal.toolkit.utilities import * -from pedal.toolkit.functions import * -from pedal.assertions.tests import equality_test - -SETTING_SHOW_CASE_DETAILS = "show case details" -DEFAULT_SETTINGS = { - SETTING_SHOW_CASE_DETAILS: True -} - -EXAMPLE_DATA = { - 'functions': [{ - 'name': 'do_complicated_stuff', - 'signature': 'int, int, [int] -> list[int]', - 'cases': [ - {'arguments': "5, 4, 3", 'returns': "12"}, - ] - }] -} - - -class FeedbackException(Exception): - def __init__(self, category, label, **fields): - self.category = category - self.label = label - self.fields = fields - - def as_message(self): - return FEEDBACK_MESSAGES[self.category][self.label].format(**self.fields) - - -def check_function_defined(function, function_definitions, settings=None): - # 1. Is the function defined syntactically? - # 1.1. With the right name? - function_name = function['name'] - if function_name not in function_definitions: - raise FeedbackException('toolkit', 'missing_function', function_name=function_name) - definition = function_definitions[function_name] - return definition - - -def check_function_signature(function, definition, settings=None): - function_name = function['name'] - # 1.2. With the right parameters and return type? - # 1.2.1 'arity' style - simply checks number of parameters - if 'arity' in function or 'parameters' in function: - expected_arity = function['arity'] if 'arity' in function else len(function['parameters']) - actual_arity = len(definition.args.args) - if actual_arity < expected_arity: - raise FeedbackException('toolkit', 'insufficient_args', - function_name=function_name, expected_arity=expected_arity, - actual_arity=actual_arity) - elif actual_arity > expected_arity: - raise FeedbackException('toolkit', 'excessive_args', - function_name=function_name, expected_arity=expected_arity, - actual_arity=actual_arity) - # 1.2.2 'parameters' style - checks each parameter's name and type - if 'parameters' in function: - expected_parameters = function['parameters'] - actual_parameters = definition.args.args - for expected_parameter, actual_parameter in zip(expected_parameters, actual_parameters): - actual_parameter_name = get_arg_name(actual_parameter) - if 'name' in expected_parameter: - if actual_parameter_name != expected_parameter['name']: - raise FeedbackException('toolkit', 'wrong_parameter_name', - function_name=function_name, - expected_parameter_name=expected_parameter['name'], - actual_parameter_name=actual_parameter_name - ) - if 'type' in expected_parameter: - actual_parameter_type = parse_type(actual_parameter) - # TODO: Handle non-string expected_parameter types (dict) - expected_parameter_type = parse_type_value(expected_parameter['type'], True) - if not type_check(expected_parameter_type, actual_parameter_type): - raise FeedbackException('toolkit', 'wrong_parameter_type', - function_name=function_name, - parameter_name=actual_parameter_name, - expected_parameter_type=expected_parameter_type, - actual_parameter_type=actual_parameter_type) - # 1.2.3. 'returns' style - checks the return type explicitly - if 'returns' in function: - expected_returns = parse_type_value(function['returns'], True) - actual_returns = parse_type(definition.returns) - if actual_returns != "None": - if not type_check(expected_returns, actual_returns): - raise FeedbackException("toolkit", "wrong_returns", - function_name=function_name, expected_returns=expected_returns, - actual_returns=actual_returns) - elif expected_returns != "None": - raise FeedbackException("toolkit", "missing_returns", - function_name=function_name, expected_returns=expected_returns) - # 1.2.4. 'signature' style - shortcut for specifying the types - if 'signature' in function: - expected_signature = function['signature'] - actual_returns = parse_type(definition.returns) - actual_parameters = ", ".join(parse_type(actual_parameter.annotation) - for actual_parameter in definition.args.args) - actual_signature = "{} -> {}".format(actual_parameters, actual_returns) - if not type_check(expected_signature, actual_signature): - raise FeedbackException("toolkit", "wrong_signature", - function_name=function_name, expected_signature=expected_signature, - actual_signature=actual_signature) - # All good here! - return True - - -def check_function_value(function, values, settings): - """ - 2. Does the function exist in the data? - - :param function: - :param values: - :param settings: - :return: - """ - function_name = function['name'] - # 2.1. Does the name exist in the values? - if function_name not in values: - raise FeedbackException("toolkit", "function_not_available", function_name=function_name) - function_value = values[function_name] - # 2.2. Is the name bound to a callable? - if not callable(function_value): - raise FeedbackException("toolkit", "name_is_not_function", function_name=function_name) - # All good here - return function_value - - -class TestCase: - CASE_COUNT = 0 - - def __init__(self, function_name, case_name): - self.function_name = function_name - if case_name is None: - self.case_name = str(TestCase.CASE_COUNT) - TestCase.CASE_COUNT += 1 - else: - self.case_name = case_name - self.arguments, self.has_arguments = [], False - self.inputs, self.has_inputs = [], False - self.error, self.has_error = None, False - self.message, self.has_message = None, False - self.expected_prints, self.has_expected_prints = None, False - self.expected_returns, self.has_expected_returns = None, False - self.prints = [] - self.returns = None - self.success = True - - def add_message(self, message): - self.message = message - self.has_message = True - - def add_inputs(self, inputs): - if not isinstance(inputs, list): - inputs = [inputs] - self.inputs = inputs - self.has_inputs = True - - def add_arguments(self, arguments): - if not isinstance(arguments, list): - arguments = [arguments] - self.arguments = arguments - self.has_arguments = True - - def add_error(self, error): - self.error = error - self.has_error = True - self.success = False - - def add_expected_prints(self, prints): - self.expected_prints = prints - self.has_expected_prints = True - - def add_expected_returns(self, returns): - self.expected_returns = returns - self.has_expected_returns = True - - def add_prints_returns(self, prints, returns): - self.prints = prints - self.returns = returns - - def fail(self): - self.success = False - - -def check_case(function, case, student_function): - """ - - :param function: - :param case: - :param student_function: - :return: status, arg, input, error, output, return, message - """ - function_name = function['name'] - test_case = TestCase(function_name, case.get('name')) - # Get callable - sandbox = _check_sandbox(MAIN_REPORT) - sandbox.set_output(None) - # Potential bonus message - if 'message' in case: - test_case.add_message(case['message']) - # Queue up the the inputs - if 'inputs' in case: - test_case.add_inputs(case['inputs']) - sandbox.set_input(test_case.inputs) - else: - sandbox.set_input(None) - # Pass in the arguments and call the function - if 'arguments' in case: - test_case.add_arguments(case['arguments']) - result = sandbox.call(function_name, *test_case.arguments, - report_exceptions=False, context=False) - # Store actual values - test_case.add_prints_returns(sandbox.output, result) - # Check for errors - if sandbox.exception: - test_case.add_error(sandbox.exception) - # 4. Check out the output - if 'prints' in case: - test_case.add_expected_prints(case['prints']) - if not output_test(sandbox.output, case['prints'], False, .0001): - test_case.fail() - # 5. Check the return value - if 'returns' in case: - test_case.add_expected_returns(case['returns']) - if not equality_test(result, case['returns'], True, .0001): - test_case.fail() - # TODO: Check the plots - # Return results - return test_case - - -# TODO: blockpy-feedback-unit => pedal-test-cases in BlockPy Client -TEST_TABLE_TEMPLATE = """ - - - - - - - {body} -
    ArgumentsExpectedReturned
    """ -TEST_TABLE_FOOTER = "" -TEST_TABLE_ROW_HEADER = "" -TEST_TABLE_ROW_NORMAL = "" -TEST_TABLE_ROW_FOOTER = "" -TEST_TABLE_ROW_INFO = "" -GREEN_CHECK = " ✔" -RED_X = " ❌" -CODE_CELL = " {}" -COLUMN_TITLES = ["", "Arguments", "Inputs", "Errors", "Expected", "Expected", "Returned", "Printed"] - - -def make_table(cases): - body = [] - for case in cases: - body.append(" ") - body.append(GREEN_CHECK if case.success else RED_X) - body.append(CODE_CELL.format(", ".join(repr(arg) for arg in case.arguments))) - if case.has_error: - body.append(" Error: {}".format(str(case.error))) - else: - body.append(CODE_CELL.format(repr(case.expected_returns))) - body.append(CODE_CELL.format(repr(case.returns))) - if not case.success and case.has_message: - body.append(" {}".format(case.message)) - body.append(" ") - body = "\n".join(body) - return TEST_TABLE_TEMPLATE.format(body=body) - # if ((any(args) and any(inputs)) or - # (any(expected_outputs) and any(expected_returns)) or - # (any(actual_outputs) and any(actual_returns))): - # # Complex cells - # pass - # else: - # Simple table - # Make header - - # row_mask = [True, any(args), any(inputs), False, - # any("returns" in reason for reason in reasons), - # any("prints" in reason for reason in reasons), - # any("returns" in reason for reason in reasons), - # any("prints" in reason for reason in reasons)] - # header_cells = "".join("{}".format(title) for use, title in zip(row_mask, COLUMN_TITLES) if use) - # body = [TEST_TABLE_ROW_HEADER.format(header_cells)] - # for case in zip( - # statuses, args, inputs, errors, actual_outputs, actual_returns, - # expected_outputs, expected_returns): - # status, case = case[0], case[1:] - # print(row_mask[1:], case) - # def make_code(values): - # if values == None: - # return "None" - # elif isinstance(values, int): - # return "{!r}".format(values) - # else: - # return ", ".join("{}".format(repr(value)) for value in values) - # body.append( - # TEST_TABLE_ROW_NORMAL+ - # (GREEN_CHECK if case[0] else RED_X)+ - # "\n".join(" {}".format(make_code(values)) - # for use, values in zip(row_mask[1:], case) if use)+ - # "\n" - # ) - # # Make each row - # table = "{}\n{}\n{}".format(TEST_TABLE_HEADER, "\n ".join(body), TEST_TABLE_FOOTER) - # return table - - -def check_cases(function, student_function, settings): - function_name = function['name'] - if 'cases' in function: - cases = function['cases'] - test_cases = [check_case(function, case, student_function) for case in cases] - success_cases = sum(test.success for test in test_cases) - if success_cases < len(cases): - if settings[SETTING_SHOW_CASE_DETAILS]: - table = make_table(test_cases) - raise FeedbackException("toolkit", "failed_test_cases", - function_name=function_name, - cases_count=len(cases), failure_count=len(cases) - success_cases, - table=table) - else: - raise FeedbackException("toolkit", "failed_test_cases_count", - function_name=function_name, - cases_count=len(cases), failure_count=len(cases) - success_cases) - - -def get_arg_name(node): - name = node.id - if name is None: - return node.arg - else: - return name - - -def load_question(data): - """ - - :param data: - :return: - """ - ast = parse_program() - student = compatibility.get_student_data() - # Check that there aren't any invalid syntactical structures - # Get all of the function ASTs in a dictionary - function_definitions = {definition._name: definition - for definition in ast.find_all("FunctionDef")} - settings = DEFAULT_SETTINGS.copy() - settings.update(data.get('settings', {})) - rubric = settings.get('rubric', {}) - function_points = 0 - if 'functions' in data: - function_rubric = rubric.get('functions', {}) - successes = [] - for function in data['functions']: - success = False - try: - definition = check_function_defined(function, function_definitions, settings) - function_points += function_rubric.get('definition', 10) - check_function_signature(function, definition, settings) - function_points += function_rubric.get('signature', 10) - student_function = check_function_value(function, student.data, settings) - function_points += function_rubric.get('value', 0) - except FeedbackException as fe: - yield fe.as_message(), fe.label - else: - try: - check_cases(function, student_function, settings) - except FeedbackException as fe: - success_ratio = (1.0 - fe.fields['failure_count'] / fe.fields['cases_count']) - function_points += function_rubric.get('cases', 80 * success_ratio) - yield fe.as_message(), fe.label - else: - function_points += function_rubric.get('cases', 80) - success = True - successes.append(success) - function_points /= len(data['functions']) - if all(successes): - set_success() - else: - give_partial(function_points) - - -def check_question(data): - results = list(load_question(data)) - if results: - message, label = results[0] - gently(message, label=label) - - -def check_pool(questions): - pass - - -def load_file(filename): - pass - - -FEEDBACK_MESSAGES = { - "toolkit": { - "missing_function": "No function named `{function_name}` was found.", - "insufficient_args": ("The function named `{function_name}` " - "has fewer parameters ({actual_arity}) " - "than expected ({expected_arity})."), - "excessive_args": ("The function named `{function_name}` " - "has more parameters ({actual_arity}) " - "than expected ({expected_arity})."), - # TODO: missing_parameter that checks if parameter name exists, but is in the wrong place - "wrong_parameter_name": ("Error in definition of `{function_name}`. " - "Expected a parameter named `{expected_parameter_name}`, " - "instead found `{actual_parameter_name}`."), - "wrong_parameter_type": ("Error in definition of function `{function_name}` " - "parameter `{parameter_name}`. Expected `{expected_parameter_type}`, " - "instead found `{actual_parameter_type}`."), - "missing_returns": ("Error in definition of function `{function_name}` return type. " - "Expected `{expected_returns}`, but there was no return type specified."), - "wrong_returns": ("Error in definition of function `{function_name}` return type. " - "Expected `{expected_returns}`, instead found `{actual_returns}`."), - "wrong_signature": ("Error in definition of function `{function_name}` signature. " - "Expected `{expected_signature}`, instead found `{actual_signature}`."), - "name_is_not_function": "You defined `{function_name}`, but did not define it as a function.", - "function_not_available": ("You defined `{function_name}` somewhere in your code, " - "but it was not available in the top-level scope to be called. " - "Perhaps you defined it inside another function or scope?"), - "failed_test_cases": ("I ran your function {function_name} on my own test cases. " - "It failed {failure_count}/{cases_count} of my tests.\n{table}"), - "failed_test_cases_count": ("I ran your function {function_name} on my own test cases. " - "It failed {failure_count}/{cases_count} of my tests."), - } -} diff --git a/src/lib/pedal/questions/setup.py b/src/lib/pedal/questions/setup.py deleted file mode 100644 index d5308872a9..0000000000 --- a/src/lib/pedal/questions/setup.py +++ /dev/null @@ -1,42 +0,0 @@ -from pedal.report.imperative import MAIN_REPORT - -import hashlib - - -def _name_hash(name): - return hashlib.md5(name.encode('utf8')).digest()[0] - - -def _setup_questions(report): - ''' - Initialize any necessary fields for the report's question tool. - - Args: - report (Report): The report object to store data and feedback in. - ''' - if 'questions' not in report: - report['questions'] = { - 'seed': 0 - } - - -def set_seed(seed_value, report=None): - ''' - Sets the seed that will be used in selecting questions. - - Args: - seed_value (int or str or iterable[int]): The value to use when - selecting questions, deterministically. If int, the same index - will be used for all questions. If an iterable of ints, each - one will serve as the index for the corresponding problem (throws - an exception if the iterable isn't long enough). If a string, - it will be hashed to a value (the hash is deterministic across - platforms) that will be modulo'd to be in the right range for the - pool. Presently, hashing generates values from [0, 256) so you - need to limit your questions to 256. - report (Report): The report object to store data and feedback in. If - left None, defaults to the global MAIN_REPORT. - ''' - if report is None: - report = MAIN_REPORT - report['questions']['seed'] = seed_value diff --git a/src/lib/pedal/report/__init__.py b/src/lib/pedal/report/__init__.py deleted file mode 100644 index 250ba0da0c..0000000000 --- a/src/lib/pedal/report/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -The collection of classes and functions used to store the fundamental Report -and Feedback objects. -""" - -from pedal.report.report import Report -from pedal.report.feedback import Feedback -from pedal.report.imperative import * diff --git a/src/lib/pedal/report/feedback.py b/src/lib/pedal/report/feedback.py deleted file mode 100644 index 9ccb53aaef..0000000000 --- a/src/lib/pedal/report/feedback.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Simple data classes for storing feedback to present to learners. -""" - -__all__ = ['Feedback'] - - -class Feedback: - """ - A class for storing raw feedback. - - Attributes: - label (str): An internal name for this specific piece of feedback. - tool (str): An internal name for indicating the tool that created - this feedback. - category (str): A human-presentable name showable to the learner. - More than one Feedback will be in a category, most - likely. - priority (str): An indication of how important this feedback is. - Might be "high/medium/low" or the name of a - category (tool?) to supersede. Exactly how this gets - used is up to the resolver. A special kind of priority - is "positive" - which indicates that this feedback is - positive, and the information is good to convey to the - student. - group (int or str): The group that this piece of feedback should be - associated with. Some resolvers want to group feedback using this - identifier. - result (bool): Whether or not this feedback is associated with the - learner completing the task ("Success!"). - performance (float): A relative amount that this feedback contributes - to the students' performance (think in terms of - partial credit, like "Triggering this feedback - is worth 20% (.2)"). - misconceptions (Message): A description of the misconception that - is believed to be in the student's mind, - or perhaps the relevant concept from the - material that should be associated with - this. ("Variables must be initialized - before they are used"). - mistakes (Message): A description of the error or bug that the - student has created ("NameError on line 5: sum - has not been defined"). - hints (Message): A suggestion for what the student can do - ("Initialize the sum variable on line 2"). - constraints (Message): A description of the task requirements or - task type that the student has violated - ("You used a for loop, but this question - expected you to use recursion."). - metacognitives (Message): A suggestion for more regulative - strategies ("You have been working for - 5 hours, perhaps it is time to take - a break?"). - """ - MESSAGE_TYPES = ['hint', 'mistake', 'misconception', - 'constraint', 'metacognitive'] - - def __init__(self, label, tool='instructor', - category='Instructor feedback', priority=None, group=None, - result=None, performance=None, misconception=None, - mistake=None, hint=None, constraint=None, - metacognitive=None): - # Metadata - self.label = label - self.tool = tool - self.category = category - self.priority = priority - self.group = group - # Data - self.result = result - self.performance = performance - self.misconception = misconception - self.mistake = mistake - self.hint = hint - self.constraint = constraint - self.metacognitive = metacognitive - - def __str__(self): - return "".format(self.label) - - def __repr__(self): - metadata = "" - if self.tool is not None: - metadata += ", tool=" + self.tool - if self.category is not None: - metadata += ", category=" + self.category - if self.priority is not None: - metadata += ", priority=" + self.priority - if self.group is not None: - metadata += ", group=" + str(self.group) - data = "" - return "Feedback({}{}{})".format(self.label, metadata, data) - - -""" -A Message is one of: - str - Dict with a `message` field and any other suitable fields, such as: - html_message: An HTML message instead of a plaintext message. - line: The line number to highlight - error: The error message to render -""" diff --git a/src/lib/pedal/report/imperative.py b/src/lib/pedal/report/imperative.py deleted file mode 100644 index ef8cb76839..0000000000 --- a/src/lib/pedal/report/imperative.py +++ /dev/null @@ -1,94 +0,0 @@ -""" -Imperative style commands for constructing feedback in a convenient way. -Uses a global report object (MAIN_REPORT). -""" - -__all__ = ['set_success', 'compliment', 'give_partial', 'explain', 'explain_r', - 'gently', 'gently_r', 'hide_correctness', 'suppress', 'log', 'debug', - 'clear_report', 'get_all_feedback', 'MAIN_REPORT', 'guidance'] - -from pedal.report.report import Report - -#: The global Report object. Meant to be used as a default singleton -#: for any tool, so that instructors do not have to create their own Report. -#: Of course, all APIs are expected to work with a given Report, and only -#: default to this Report when no others are given. -MAIN_REPORT = Report() - - -def set_success(): - """ - Creates Successful feedback for the user, indicating that the entire - assignment is done. - """ - MAIN_REPORT.set_success() - - -def compliment(message, line=None): - """ - Create a positive feedback for the user, potentially on a specific line of - code. - - Args: - message (str): The message to display to the user. - line (int): The relevant line of code to reference. - """ - MAIN_REPORT.compliment(message, line) - - -def give_partial(value, message=None): - """ - Increases the user's current score by the `value`. Optionally display - a positive message too. - - Args: - value (number): The number to increase the user's score by. - message (str): The message to display to the user. - """ - MAIN_REPORT.give_partial(value, message) - - -def explain(message, priority='medium', line=None, label='explain'): - MAIN_REPORT.explain(message, priority, line, label=label) - - -def guidance(message, priority='medium', line=None, label='Guidance'): - MAIN_REPORT.guidance(message, priority, line, label=label) - - -def gently(message, line=None, label='explain'): - MAIN_REPORT.gently(message, line, label=label) - - -def gently_r(message, code, line=None, label="explain"): - gently(message + "

    ({})

    ".format(code), line, label=label) - return message - - -def explain_r(message, code, priority='medium', line=None, label="explain"): - explain(message + "

    ({})

    ".format(code), priority, line, label=label) - return message - - -def hide_correctness(): - MAIN_REPORT.hide_correctness() - - -def suppress(category, label=True): - MAIN_REPORT.suppress(category, label) - - -def log(message): - MAIN_REPORT.log(message) - - -def debug(message): - MAIN_REPORT.debug(message) - - -def clear_report(): - MAIN_REPORT.clear() - - -def get_all_feedback(): - return MAIN_REPORT.feedback diff --git a/src/lib/pedal/report/report.py b/src/lib/pedal/report/report.py deleted file mode 100644 index 2b14bd30df..0000000000 --- a/src/lib/pedal/report/report.py +++ /dev/null @@ -1,164 +0,0 @@ -from pedal.report.feedback import Feedback - -__all__ = ['Report'] - - -class Report: - """ - A class for storing Feedback generated by Tools, along with any auxiliary - data that the Tool might want to provide for other tools. - - Attributes: - feedback (list of Feedback): The raw feedback generated for this Report - so far. - suppressions (list of tuple(str, str)): The categories and labels that - have been suppressed so far. - group (int or str): The label for the current group. Feedback given - by a Tool will automatically receive the current `group`. This - is used by the Source tool, for example, in order to group feedback - by sections. - group_names (dict[group:str]): A printable, student-facing name for the - group. When a group needs to be rendered out to the user, this - will override whatever label was going to be presented instead. - group_order (sequence or callable or None): The mechanism to use to - order groups. If a sequence, the order will be inferred based on - the order of elements in the sequence. If a callable, the callable - will be used as a key function for `sort`. If `None`, then defaults - to the natural ordering of the groups. Defaults to `None`. - hooks (dict[str: list[callable]): A dictionary mapping events to - a list of callable functions. Tools can register functions on - hooks to have them executed when the event is triggered by another - tool. For example, the Assertions tool has hooks on the Source tool - to trigger assertion resolutions before advancing to next sections. - _results (dict of str => any): Maps tool names to their data. The - namespace for a tool can be used to - store whatever they want, but will - probably be in a dictionary itself. - """ - group_order = None - - def __init__(self): - """ - Creates a new Report instance. - """ - self.clear() - - def clear(self): - self.feedback = [] - self.suppressions = {} - self._results = {} - self.group = None - self.group_names = {} - self.hooks = {} - - def set_success(self, group=None): - """ - Creates Successful feedback for the user, indicating that the entire - assignment is done. - """ - if group is None: - group = self.group - self.feedback.append(Feedback('set_success', priority='positive', - result=True, group=group)) - - def give_partial(self, value, message=None, group=None): - if value is None: - return False - if group is None: - group = self.group - self.feedback.append(Feedback('give_partial', performance=value, - priority='positive', - group=group, - mistake=message)) - return True - - def hide_correctness(self): - self.suppressions['success'] = [] - - def explain(self, message, priority='medium', line=None, group=None, - label='explain'): - misconception = {'message': message} - if line is not None: - misconception['line'] = line - if group is None: - group = self.group - self.attach(label, priority=priority, category='instructor', - group=group, misconception=misconception) - - def gently(self, message, line=None, group=None, label='explain'): - self.explain(message, priority='student', line=line, group=group, - label=label) - - def guidance(self, message, line=None, group=None, label='guidance'): - hint = {'message': message} - if line is not None: - hint['line'] = line - if group is None: - group = self.group - self.attach(label, priority='instructions', category='instructions', group=group, hint=hint) - - def compliment(self, message, line=None, group=None, label='explain'): - self.explain(message, priority='positive', line=line, group=group, - label=label) - - def attach(self, label, **kwargs): - self.feedback.append(Feedback(label, **kwargs)) - - def log(self, message): - pass - - def debug(self, message): - pass - - def suppress(self, category, label=True, where=True): - """ - Args: - category (str): The category of feedback to suppress. - label (str): A specific label to match against and suppress. - where (bool or group): Which group of report to localize the - suppression to. If instead `True` is passed, the suppression - occurs in every group globally. - TODO: Currently, only global suppression is supported. - """ - category = category.lower() - if isinstance(label, str): - label = label.lower() - if category not in self.suppressions: - self.suppressions[category] = [] - self.suppressions[category].append(label) - - def add_hook(self, event, function): - """ - Register the `function` to be executed when the given `event` is - triggered. - - Args: - event (str): An event name. Multiple functions can be triggered for - the same `event`. The format is as follows: - "pedal.module.function.extra" - - The `".extra"` component is optional to add further nuance, but - the general idea is that you are referring to functions that, - when called, should trigger other functions to be called first. - function (callable): A callable function. This function should - accept a keyword parameter named `report`, which will - """ - if event not in self.hooks: - self.hooks[event] = [] - self.hooks[event].append(function) - - def execute_hooks(self, event): - if event in self.hooks: - for function in self.hooks[event]: - function(report=self) - - def __getitem__(self, key): - if key not in self._results: - self._results[key] = {} - return self._results[key] - - def __setitem__(self, key, value): - self._results[key] = value - - def __contains__(self, key): - return key in self._results diff --git a/src/lib/pedal/resolvers/__init__.py b/src/lib/pedal/resolvers/__init__.py deleted file mode 100644 index 0bbf9c8536..0000000000 --- a/src/lib/pedal/resolvers/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -""" - -Resolver Types - -Does there need to be some kind of hook for Tools to wrap up their business? - -Simple - Find the highest priority feedback and show that, along with any positive feedback. - -Sectional - Find the highest priority feedback for each section, and show that along with any positive feedback. - -Full - Report all feedback, grouped by tool/category/priority/time. - -Full Summary - Report all feedback but divided into frequencies of labels grouped by tool/category/priority/time. - -""" diff --git a/src/lib/pedal/resolvers/core.py b/src/lib/pedal/resolvers/core.py deleted file mode 100644 index 07a0b696e2..0000000000 --- a/src/lib/pedal/resolvers/core.py +++ /dev/null @@ -1,22 +0,0 @@ -from pedal.report.imperative import MAIN_REPORT - - -def make_resolver(func, report=None): - ''' - Decorates the given function as a Resolver. This means that when the - function is executed, the `"pedal.resolver.resolve"` event will be - triggered. - - Args: - func (callable): The function to decorate. - report (Report): The Report to trigger the event on. If None, then use - the `MAIN_REPORT`. - ''' - if report is None: - report = MAIN_REPORT - - def resolver_wrapper(): - report.execute_hooks("pedal.resolvers.resolve") - return func() - - return resolver_wrapper diff --git a/src/lib/pedal/resolvers/readme.md b/src/lib/pedal/resolvers/readme.md deleted file mode 100644 index 8b244293ae..0000000000 --- a/src/lib/pedal/resolvers/readme.md +++ /dev/null @@ -1,3 +0,0 @@ -# Resolvers - -A tool for selecting and managing reported data from other tools, in order to select a relevant piece of feedback. \ No newline at end of file diff --git a/src/lib/pedal/resolvers/sectional.py b/src/lib/pedal/resolvers/sectional.py deleted file mode 100644 index c3fae486d2..0000000000 --- a/src/lib/pedal/resolvers/sectional.py +++ /dev/null @@ -1,77 +0,0 @@ -import sys - -from pedal.resolvers import simple -from pedal.report import MAIN_REPORT - - -def resolve(report=None, priority_key=None): - """ - Args: - report (Report): The report object to resolve down. Defaults to the - global MAIN_REPORT - - Returns - str: A string of HTML feedback to be delivered - """ - if report is None: - report = MAIN_REPORT - if priority_key is None: - priority_key = simple.by_priority - # Prepare feedbacks - feedbacks = report.feedback - feedbacks.sort(key=lambda f: (f.group or 0, priority_key(f))) - suppressions = report.suppressions - # Process - final_success = False - final_score = 0 - finals = {} - found_failure = False - for feedback in feedbacks: - group = feedback.group or 0 - category = feedback.category.lower() - if category in suppressions: - if True in suppressions[category]: - continue - elif feedback.label.lower() in suppressions[category]: - continue - success, partial, message, data = simple.parse_feedback(feedback) - final_success = success or final_success - final_score += partial - if message is not None: - # print("RESETING GROUP", group, message[:20], found_failure, feedback.priority) - if group not in finals: - finals[group] = [] - found_failure = False - if feedback.priority not in ('positive', 'instructions'): - if found_failure: - continue - found_failure = True - entry = {'label': feedback.label, - 'message': message, - 'category': feedback.category, - 'priority': feedback.priority, - 'data': data} - if feedback.priority == 'instructions': - # Find end of instructions - index = 0 - for feedback in finals[group]: - if feedback['priority'] != 'instructions': - break - index += 1 - finals[group].insert(index, entry) - elif feedback.priority != 'positive': - finals[group].insert(0, entry) - else: - finals[group].append(entry) - # from pprint import pprint - # pprint(finals) - final_hide_correctness = suppressions.get('success', False) - if not finals: - finals[0] = [{ - 'label': 'No errors', - 'category': 'Instructor', - 'data': [], - 'priority': 'medium', - 'message': "No errors reported." - }] - return (final_success, final_score, final_hide_correctness, finals) diff --git a/src/lib/pedal/resolvers/simple.py b/src/lib/pedal/resolvers/simple.py deleted file mode 100644 index 59222f2cea..0000000000 --- a/src/lib/pedal/resolvers/simple.py +++ /dev/null @@ -1,156 +0,0 @@ -from pedal.report import MAIN_REPORT, Feedback -from pedal.resolvers.core import make_resolver - -DEFAULT_CATEGORY_PRIORITY = [ - 'syntax', - 'mistakes', - 'instructor', - 'analyzer', - 'runtime', - 'student', - 'positive', - 'instructions', - 'uncategorized', -] - -# For compatibility with the old feedback API -LEGACY_CATEGORIZATIONS = { - # 'student': 'runtime', - 'parser': 'syntax', - 'verifier': 'syntax', - 'instructor': 'instructor' -} - - -def by_priority(feedback): - """ - Converts a feedback into a numeric representation for sorting. - - Args: - feedback (Feedback): The feedback object to convert - Returns: - float: A decimal number representing the feedback's relative priority. - """ - category = 'uncategorized' - if feedback.category is not None: - category = feedback.category.lower() - priority = 'medium' - if feedback.priority is not None: - priority = feedback.priority.lower() - priority = LEGACY_CATEGORIZATIONS.get(priority, priority) - if category in DEFAULT_CATEGORY_PRIORITY: - value = DEFAULT_CATEGORY_PRIORITY.index(category) - else: - value = len(DEFAULT_CATEGORY_PRIORITY) - offset = .5 - if priority == 'low': - offset = .7 - elif priority == 'high': - offset = .3 - elif priority not in ('low', 'medium', 'high'): - if priority in DEFAULT_CATEGORY_PRIORITY: - value = DEFAULT_CATEGORY_PRIORITY.index(priority) - offset = .1 - return value + offset - - -def parse_message(component): - if isinstance(component, str): - return component - elif isinstance(component, list): - return '
    \n'.join(parse_message(c) for c in component) - elif isinstance(component, dict): - if "html" in component: - return component["html"] - elif "message" in component: - return component["message"] - else: - raise ValueError("Component has no message field: " + str(component)) - else: - raise ValueError("Invalid component type: " + str(type(component))) - - -def parse_data(component): - if isinstance(component, str): - return [{'message': component}] - elif isinstance(component, list): - return component - elif isinstance(component, dict): - return [component] - - -def parse_feedback(feedback): - # Default returns - success = False - performance = 0 - message = None - data = [] - # Actual processing - for feedback_type in Feedback.MESSAGE_TYPES: - feedback_value = getattr(feedback, feedback_type) - if feedback_value is not None: - data.extend(parse_data(feedback_value)) - parsed_message = parse_message(feedback_value) - if parsed_message is not None: - message = parsed_message - if feedback.result is not None: - success = feedback.result - if feedback.performance is not None: - performance = feedback.performance - return success, performance, message, data - - -@make_resolver -def resolve(report=None, priority_key=None): - """ - Args: - report (Report): The report object to resolve down. Defaults to the - global MAIN_REPORT - - Returns - str: A string of HTML feedback to be delivered - """ - if report is None: - report = MAIN_REPORT - if priority_key is None: - priority_key = by_priority - # Prepare feedbacks - feedbacks = report.feedback - feedbacks.sort(key=priority_key) - suppressions = report.suppressions - # Process - final_success = False - final_score = 0 - final_message = None - final_category = 'Instructor' - final_label = 'No errors' - final_data = [] - for feedback in feedbacks: - category = feedback.category.lower() - if category in suppressions: - if True in suppressions[category]: - continue - elif feedback.label.lower() in suppressions[category]: - continue - success, partial, message, data = parse_feedback(feedback) - final_success = success or final_success - final_score += partial - if (message is not None and - final_message is None and - feedback.priority != 'positive'): - final_message = message - final_category = feedback.category - final_label = feedback.label - final_data = data - if final_message is None: - final_message = "No errors reported." - final_hide_correctness = suppressions.get('success', False) - if (not final_hide_correctness and final_success and - final_label == 'No errors' and - final_category == 'Instructor'): - final_category = 'Complete' - final_label = 'Complete' - final_message = "Great work!" - return (final_success, final_score, final_category, - final_label, final_message, final_data, - final_hide_correctness) diff --git a/src/lib/pedal/sandbox/__init__.py b/src/lib/pedal/sandbox/__init__.py deleted file mode 100644 index 212322d314..0000000000 --- a/src/lib/pedal/sandbox/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from pedal.report import MAIN_REPORT -from pedal.sandbox.sandbox import Sandbox, DataSandbox - -# Compatibility API -''' -run_student -queue_input -reset_output -get_output -''' - - -def reset(report=None): - if report is None: - report = MAIN_REPORT - report['sandbox']['run'] = Sandbox(filename=report['source']['filename']) - - -def run(raise_exceptions=True, report=None, coverage=False, threaded=False, inputs=None): - if report is None: - report = MAIN_REPORT - if 'run' not in report['sandbox']: - report['sandbox']['run'] = Sandbox(filename=report['source']['filename'], threaded=threaded) - sandbox = report['sandbox']['run'] - source_code = report['source']['code'] - sandbox.record_coverage = coverage - sandbox.run(source_code, _as_filename=report['source']['filename'], _inputs=inputs) - if raise_exceptions and sandbox.exception is not None: - name = str(sandbox.exception.__class__)[8:-2] - report.attach(name, category='Runtime', tool='Sandbox', - section=report['source']['section'], - mistakes={'message': sandbox.format_exception(), - 'error': sandbox.exception}) - return sandbox diff --git a/src/lib/pedal/sandbox/compatibility.py b/src/lib/pedal/sandbox/compatibility.py deleted file mode 100644 index a7c1a07b31..0000000000 --- a/src/lib/pedal/sandbox/compatibility.py +++ /dev/null @@ -1,124 +0,0 @@ -import sys - -from pedal.sandbox.sandbox import Sandbox -from pedal.sandbox.messages import EXTENDED_ERROR_EXPLANATION - -from pedal.report import MAIN_REPORT, Feedback - - -def _check_sandbox(report): - if 'run' not in report['sandbox']: - report['sandbox']['run'] = Sandbox() - return report['sandbox']['run'] - - -def run_student(raise_exceptions=False, report=None, old_style_messages=False): - if report is None: - report = MAIN_REPORT - sandbox = _check_sandbox(report) - source_code = report['source']['code'] - filename = report['source']['filename'] - sandbox.run(source_code, as_filename=filename, report_exceptions=not raise_exceptions) - if raise_exceptions: - raise_exception(sandbox.exception, sandbox.exception_position, - report=report, message=None if old_style_messages else sandbox.exception_formatted) - return sandbox.exception - - -def queue_input(*inputs, **kwargs): - if 'report' not in kwargs: - report = MAIN_REPORT - else: - report = kwargs['report'] - sandbox = _check_sandbox(report) - sandbox.set_input(inputs) - - -def reset_output(report=None): - if report is None: - report = MAIN_REPORT - sandbox = _check_sandbox(report) - sandbox.set_output(None) - - -def get_output(report=None): - if report is None: - report = MAIN_REPORT - sandbox = _check_sandbox(report) - return sandbox.output - - -def get_plots(report=None): - if report is None: - report = MAIN_REPORT - sandbox = _check_sandbox(report) - if 'matplotlib.pyplot' in sandbox.modules: - mock_plt = sandbox.modules['matplotlib.pyplot'] - if hasattr(mock_plt, 'plots'): - return mock_plt.plots - return [] - - -def capture_output(function, *args, **kwargs): - if 'report' in kwargs: - report = kwargs['report'] - else: - report = MAIN_REPORT - sandbox = _check_sandbox(report) - sandbox.set_output(None) - sandbox.call(function.__name__, *args) - return sandbox.output - - -def get_sandbox(report=None): - if report is None: - report = MAIN_REPORT - sandbox = _check_sandbox(report) - return sandbox - - -def raise_exception(exception, position=None, report=None, message=None): - if report is None: - report = MAIN_REPORT - sandbox = _check_sandbox(report) - if exception is None: - return - extended = EXTENDED_ERROR_EXPLANATION.get(exception.__class__, "") - if message is None: - message = "
    {}
    \n{}".format(str(exception), extended) - # Skulpt compatible name lookup - name = str(exception.__class__)[8:-2] - report.attach(name, category='Runtime', tool='Sandbox', - mistake={'message': message, - 'error': exception, - 'position': position, - 'traceback': None}) - sandbox.exception = exception - - -def get_student_data(report=None): - if report is None: - report = MAIN_REPORT - sandbox = _check_sandbox(report) - return sandbox - - -def set_sandbox(sandbox, report=None): - """ - Update the sandbox to hold the new sandbox instance. Particularly useful - for Skulpt, which needs to set the sandbox in an unusual way. - """ - if report is None: - report = MAIN_REPORT - report['sandbox']['run'] = sandbox - return sandbox - - -def trace_lines(report=None): - if report is None: - report = MAIN_REPORT - sandbox = _check_sandbox(report) - if sandbox.tracer_style == 'coverage': - return sandbox.trace.lines - sandbox.trace.missing - else: - return [] diff --git a/src/lib/pedal/sandbox/exceptions.py b/src/lib/pedal/sandbox/exceptions.py deleted file mode 100644 index ccae0bc6cc..0000000000 --- a/src/lib/pedal/sandbox/exceptions.py +++ /dev/null @@ -1,191 +0,0 @@ -import traceback -import os -import sys - -try: - TimeoutError -except NameError: - class TimeoutError(Exception): - pass - - -class SandboxException(Exception): - """ - Generic base exception for sandbox errors. - """ - - -class SandboxStudentCodeException(SandboxException): - """ - Caused by an error in student code - """ - - def __init__(self, actual): - self.actual = actual - - -class SandboxPreventModule(Exception): - """ - Caused by student attempting to load a module that they shouldn't. - """ - - -class SandboxHasNoFunction(SandboxException): - """ - Caused by attempting to access a function that the student hasn't created. - """ - - -class SandboxHasNoVariable(SandboxException): - """ - Caused by attempting to access a variable that the student hasn't created. - """ - - -class SandboxNoMoreInputsException(Exception): - """ - Caused by the student calling `input` when the instructor hasn't provided - enough inputs. Typically, the student has an infinite loop around their - `input` function. - """ - - -BuiltinKeyError = KeyError - - -class KeyError(BuiltinKeyError): - """ - A version of KeyError that replaces the built-in with one small - modification: when printing an explanatory message, the message is not - rendered as a tuple. Because that's stupid and the fact that it made it - into CPython is just rude. - - See Also: - https://github.com/python/cpython/blob/master/Objects/exceptions.c#L1556 - """ - __module__ = "builtins" - - def __init__(self, original, message): - for field in ['__cause__', '__traceback__', '__context__']: - if hasattr(original, field): - setattr(self, field, getattr(original, field)) - else: - setattr(self, field, None) - self.message = message - - def __str__(self): - return self.message - - -def _add_context_to_error(e, message): - if isinstance(e, BuiltinKeyError): - new_args = repr(e.args[0]) + message - e = KeyError(e, new_args) - e.args = tuple([new_args]) - elif isinstance(e, OSError): - # TODO: Investigate OSError, since they have so many args. - # Might be weird. - e.args = tuple([e.args[0] + message]) - return e - elif hasattr(e, 'args') and e.args: - e.args = tuple([e.args[0] + message]) - return e - - -x = sys.stdout - - -class SandboxTraceback: - """ - Class for reformatting tracebacks to have more pertinent information. - """ - - def __init__(self, exception, exc_info, full_traceback, - instructor_filename, line_offset, student_filename, - original_code_lines): - """ - Args: - exception (Exception): The exception that was raised. - exc_info (ExcInfo): The result of sys.exc_info() when the exception - was raised. - full_traceback (bool): Whether or not to provide the full traceback - or just the parts relevant to students. - instructor_filename (str): The name of the instructor file, which - can be used to avoid reporting instructor code in the - traceback. - """ - self.line_offset = line_offset - self.exception = exception - self.exc_info = exc_info - self.full_traceback = full_traceback - self.instructor_filename = instructor_filename - self.student_filename = student_filename - self.line_number = traceback.extract_tb(exc_info[2])[-1][1] - self.original_code_lines = original_code_lines - - def _clean_traceback_line(self, line): - return line.replace(', in ', '', 1) - - def format_exception(self, preamble=""): - if not self.exception: - return "" - if isinstance(self.exception, TimeoutError): - return str(self.exception) - cl, exc, tb = self.exc_info - while tb and self._is_relevant_tb_level(tb): - tb = tb.tb_next - length = self._count_relevant_tb_levels(tb) - tb_e = traceback.TracebackException(cl, self.exception, tb, limit=length, - capture_locals=False) - # print(list(), file=x) - for frame in tb_e.stack: - if frame.filename == os.path.basename(self.student_filename): - frame.lineno += self.line_offset - if frame.lineno - 1 < len(self.original_code_lines): - frame._line = self.original_code_lines[frame.lineno - 1] - else: - frame._line = "*line missing*" - lines = [self._clean_traceback_line(line) - for line in tb_e.format()] - lines[0] = "Traceback:\n" - return preamble + ''.join(lines) - - def _count_relevant_tb_levels(self, tb): - length = 0 - while tb and not self._is_relevant_tb_level(tb): - length += 1 - tb = tb.tb_next - return length - - def _is_relevant_tb_level(self, tb): - """ - Determines if the give part of the traceback is relevant to the user. - - Returns: - boolean: True means it is NOT relevant - """ - # Are in verbose mode? - if self.full_traceback: - return False - filename, a_, b_, _ = traceback.extract_tb(tb, limit=1)[0] - # Is the error in the student file? - if filename == self.student_filename: - return False - # Is the error in the instructor file? - if filename == self.instructor_filename: - return True - # Is the error in this test directory? - current_directory = os.path.dirname(os.path.realpath(__file__)) - if filename.startswith(current_directory): - return True - # Is the error related to a file in the parent directory? - parent_directory = os.path.dirname(current_directory) - # Currently we don't refer to this? - # Is the error in a local file? - if filename.startswith('.'): - return False - # Is the error in an absolute path? - if not os.path.isabs(filename): - return False - # Okay, it's not a student related file - return True diff --git a/src/lib/pedal/sandbox/messages.py b/src/lib/pedal/sandbox/messages.py deleted file mode 100644 index 350d8e43e3..0000000000 --- a/src/lib/pedal/sandbox/messages.py +++ /dev/null @@ -1,61 +0,0 @@ -# Skulpt has weird errors, and is missing some errors. Compatibility. -try: - ParseError -except NameError: - class ParseError(Exception): - pass -try: - SyntaxError -except NameError: - class SyntaxError(Exception): - pass -try: - ReferenceError -except NameError: - class ReferenceError(Exception): - pass -try: - EOFError -except NameError: - class EOFError(Exception): - pass -try: - MemoryError -except NameError: - class MemoryError(Exception): - pass -try: - OSError -except NameError: - class OSError(Exception): - pass -try: - TokenError -except NameError: - class TokenError(Exception): - pass -try: - TimeLimitError -except NameError: - class TimeLimitError(Exception): - pass - -EXTENDED_ERROR_EXPLANATION = { - ParseError: "A parse error means that Python does not understand the syntax on the line the error message points out. Common examples are forgetting commas beteween arguments or forgetting a : (colon) on a for statement.
    Suggestion: To fix a parse error you just need to look carefully at the line with the error and possibly the line before it. Make sure it conforms to all of Python's rules.", - TypeError: "Type errors most often occur when an expression tries to combine two objects with types that should not be combined. Like using + to add a number to a list instead of .append, or dividing a string by a number.
    Suggestion: To fix a type error you will most likely need to trace through your code and make sure the variables have the types you expect them to have.", - SyntaxError: "This message indicates that Python can't figure out the syntax of a particular statement. Some examples are assigning to a literal, or a function call.
    Suggestion: Check your assignment statements and make sure that the left hand side of the assignment is a variable, not a literal (e.g., 7 or \"hello\") or a function.", - NameError: "A name error almost always means that you have used a variable before it has a value. Often this may be a simple typo, so check the spelling carefully.
    Suggestion: Check the right hand side of assignment statements and your function calls, this is the most likely place for a NameError to be found. It really helps to step through your code, one line at a time, mentally keeping track of your variables.", - ValueError: "A ValueError most often occurs when you pass a parameter to a built-in function, and the function is expecting one type and you pass something different. For instance, if you try to convert a non-numeric string to an int, you will get a ValueError:
      int(\"Corgi\") # ValueError: invalid literal for int() with base 10

    Suggestion: The error message gives you a pretty good hint about the name of the function as well as the value that is incorrect. Look at the error message closely and then trace back to the variable containing the problematic value. }", - AttributeError: "This happens when you try to do SOMETHING.WHATEVER and either SOMETHING wasn't declared or WHATEVER isn't an attribute of SOMETHING. This error message is telling you that the object on the left hand side of the dot, does not have the attribute or method on the right hand side.
    Suggestion: You were probably trying to either get access to some data (weather.get) or append (a_list.append). If it's the first one, you should make sure the module is imported and that you are called its function correctly. If it's the second one, you should make sure you spelled \"append\" right and that you are using a variable with a list for a value.", - TokenError: "Most of the time this error indicates that you have forgotten a right parenthesis or have forgotten to close a pair of quotes.
    Suggestion: Check each line of your program and make sure that your parenthesis are balanced.", - IndexError: "This message means that you are trying to index past the end of a string or a list. For example, if your list has 3 things in it and you try to access the item at position 5.
    Suggestion: Remember that the first item in a list or string is at index position 0, quite often this message comes about because you are off by one. Remember in a list of length 3 the last legal index is 2.
    favorite_colors = [\"red\", \"blue\", \"green\"]\nfavorite_colors[2] # prints green favorite_color[3] # raises an IndexError
    ", - ImportError: "This error message indicates that you are trying to import a module that does not exist, or is not in the same directory as your python script.
    Suggestion: One problem may simply be that you have a typo - remember, you must not capitalize the module name. Another common problem is that you have placed the module in a different directory. Finally, if you're using a dataset module, then it might not be imported. Use the \"Import Datasets\" button below!", - ReferenceError: "This is a really hard error to get, so I'm not entirely sure what you did.
    Suggestion: Bring this code to the instructor. ", - ZeroDivisionError: "This tells you that you are trying to divide by 0. Typically this is because the value of the variable in the denominator of a division expression has the value 0.
    Suggestion: Are you sure you are dividing by the right variable? Are you sure that that variable has the value you expect - is it possible that you counted the number of elements in an empty list, for instance?", - IndentationError: "This error occurs when you have not indented your code properly. This is most likely to happen as part of an if, for, while or def statement.
    Suggestion: Check your if, def, for, and while statements to be sure the lines are properly indented beneath them (seriously, this happens ALL the time). Another source of this error comes from copying and pasting code where you have accidentally left some bits of code lying around that don't belong there anymore. Finally, a very sinister but unlikely possibility is that you have some tab characters in your code, which look identical to four spaces. Never, ever use tabs, and carefully check code from the internet to make sure it doesn't have tabs.", - EOFError: "If you are using input() or raw_input() commands, then this error happens when they don't get the right ending.
    Suggestion: It's hard to protect against users. However, if you're using input(), you might be able to use raw_input() instead to avoid this problem. ", - IOError: "This is a very easy error to get. The most common reason is that you were trying to open a file and it wasn't in the right place.
    Suggestion: Make sure that the file is in the right place - print out the file path, and then check that it's definitely on your computer at that location. If you need help doing file processing, you should probably check with an instructor.", - KeyError: "A dictionary has a bunch of keys that you can use to get data. This error is caused by you trying to refer to a key that does not exist.
    Suggestion: The most common reason you get this exception is that you have a typo in your dictionary access. Check your spelling. Also double check that the key definitely exists.", - MemoryError: "Somehow, you have run out of memory.
    Suggestion: Make sure you are filtering your dataset! Alternatively, bring your code to an instructor.", - OSError: "It's hard to say what an OSError is without deep checking. Many things can cause it.
    Suggestion: Bring your code to an instructor. ", - TimeoutError: "A TimeLimit error means that BlockPy wasn't able to process your program fast enough. Typically, this means that you're iterating through too many elements."} diff --git a/src/lib/pedal/sandbox/mocked.py b/src/lib/pedal/sandbox/mocked.py deleted file mode 100644 index da841dd62b..0000000000 --- a/src/lib/pedal/sandbox/mocked.py +++ /dev/null @@ -1,336 +0,0 @@ -""" -Mocked functions that can be used to prevent malicious or accidental `eval` -behavior. -""" -import re -import types - -from pedal.sandbox.exceptions import (SandboxNoMoreInputsException, - SandboxPreventModule) - - -def _disabled_compile(source, filename, mode, flags=0, dont_inherit=False): - """ - A version of the built-in `compile` method that fails with a runtime - error. - """ - raise RuntimeError("You are not allowed to call 'compile'.") - - -def _disabled_eval(object, globals=globals(), locals=None): - """ - A version of the built-in `eval` method that fails with a runtime - error. - """ - raise RuntimeError("You are not allowed to call 'eval'.") - - -# ------------------------------------------------------------- - - -def _disabled_exec(object, globals=globals(), locals=None): - """ - A version of the built-in `exec` method that fails with a runtime - error. - """ - raise RuntimeError("You are not allowed to call 'exec'.") - - -# ------------------------------------------------------------- - - -def _disabled_globals(): - """ - A version of the built-in `globals` method that fails with a runtime - error. - """ - raise RuntimeError("You are not allowed to call 'globals'.") - - -class FunctionNotAllowed(Exception): - pass - - -def disabled_builtin(name): - def _disabled_version(*args, **kwargs): - raise FunctionNotAllowed("You are not allowed to call '{}'.".format(name)) - - return _disabled_version - - -_OPEN_FORBIDDEN_NAMES = re.compile(r"(^[./])|(\.py$)") -_OPEN_FORBIDDEN_MODES = re.compile(r"[wa+]") - - -# TODO: Turn this into a function that lets us more elegantly specify valid and -# invalid filenames/paths - - -def _restricted_open(name, mode='r', buffering=-1): - if _OPEN_FORBIDDEN_NAMES.search(name): - raise RuntimeError("The filename you passed to 'open' is restricted.") - elif _OPEN_FORBIDDEN_MODES.search(mode): - raise RuntimeError("You are not allowed to 'open' files for writing.") - else: - return _original_builtins['open'](name, mode, buffering) - - -# TODO: Allow this to be flexible - - -def _restricted_import(name, globals=None, locals=None, fromlist=(), level=0): - if name == 'pedal' or name.startswith('pedal.'): - raise RuntimeError("You cannot import pedal!") - return _original_builtins['__import__'](name, globals, locals, fromlist, level) - - -try: - __builtins__ -except NameError: - _default_builtins = {'globals': globals, - 'locals': locals, - 'open': open, - 'input': input, - '__import__': __import__} -else: - if isinstance(__builtins__, types.ModuleType): - _default_builtins = __builtins__.__dict__ - else: - _default_builtins = __builtins__ - -_original_builtins = { - 'globals': _default_builtins['globals'], - 'locals': _default_builtins['locals'], - 'open': _default_builtins['open'], - 'input': _default_builtins['input'], - 'exec': _default_builtins.get('exec', _disabled_exec), - 'eval': _default_builtins.get('eval', _disabled_eval), - 'compile': _default_builtins.get('compile', _disabled_compile), - '__import__': _default_builtins['__import__'] -} - - -def make_inputs(input_list, repeat=None): - """ - Helper function for creating mock user input. - - Params: - input_list (list of str): The list of inputs to be returned - Returns: - function (str=>str): The mock input function that is returned, which - will return the next element of input_list each - time it is called. - """ - generator = iter(input_list) - - def mock_input(prompt=''): - print(prompt) - try: - return next(generator) - except StopIteration as SI: - if repeat is None: - # TODO: Make this a custom exception - raise SandboxNoMoreInputsException("User had no more input to give.") - else: - return repeat - - return mock_input - - -_sys_modules = {} - - -def _override_builtins(namespace, custom_builtins): - """ - Add the custom builtins to the `namespace` (and the original `__builtins__`) - suitable for `exec`. - """ - # Obtain the dictionary of built-in methods, which might not exist in - # some python versions (e.g., Skulpt) - - # Create a shallow copy of the dictionary of built-in methods. Then, - # we'll take specific ones that are unsafe and replace them. - namespace["__builtins__"] = _default_builtins.copy() - for name, function in custom_builtins.items(): - namespace["__builtins__"][name] = function - - -def create_module(module_name): - submodule_names = module_name.split(".") - modules = {} - root = types.ModuleType(submodule_names[0]) - modules[submodule_names[0]] = root - reconstructed_path = submodule_names[0] - for submodule_name in submodule_names[1:]: - reconstructed_path += "." + submodule_name - new_submodule = types.ModuleType(reconstructed_path) - setattr(root, submodule_name, new_submodule) - modules[reconstructed_path] = new_submodule - return root, modules - - -class MockModule: - def _generate_patches(self): - return {k: v for k, v in vars(self).items() - if not k.startswith('_')} - - def _add_to_module(self, module): - for name, value in self._generate_patches().items(): - setattr(module, name, value) - - -class BlockedModule(MockModule): - MODULE_NAME = "this module" - - def _generate_patches(self): - return {'__getattr__': self.prevent_module} - - def prevent_module(self, **kwargs): - raise SandboxPreventModule("You cannot import {module_name} from student code.".format( - module_name=self.MODULE_NAME - )) - - -class MockPedal(BlockedModule): - MODULE_NAME = "pedal" - - -class MockTurtle(MockModule): - """ - Mock Turtle Module that can be used to trace turtle calls. - - Attributes: - calls (list of dict): The traced list of calls - # TODO: it'd be awesome to have a way to construct a representation - # of the drawing result that we could autograde! - """ - - def __init__(self): - super().__init__() - - def _reset_turtles(self): - self.calls = [] - - def __repr__(self): - return repr(self.plots) - - ''' - def _generate_patches(self): - def dummy(**kwargs): - pass - - return dict(Canvas=dummy, Pen=dummy, RawPen=dummy, RawTurtle=dummy, Screen=dummy, ScrolledCanvas=dummy, - Shape=dummy, TK=dummy, TNavigator=dummy, TPen=dummy, Tbuffer=dummy, Terminator=dummy, - Turtle=dummy, TurtleGraphicsError=dummy, TurtleScreen=dummy, TurtleScreenBase=dummy, Vec2D=dummy, - addshape=dummy, back=dummy, backward=dummy, begin_fill=dummy, begin_poly=dummy, bgcolor=dummy, - bgpic=dummy, bk=dummy, bye=dummy, circle=dummy, clear=dummy, clearscreen=dummy, clearstamp=dummy, - clearstamps=dummy, clone=dummy, color=dummy, colormode=dummy, config_dict=dummy, deepcopy=dummy, - degrees=dummy, delay=dummy, distance=dummy, done=dummy, dot=dummy, down=dummy, end_fill=dummy, - end_poly=dummy, exitonclick=dummy, fd=dummy, fillcolor=dummy, filling=dummy, forward=dummy, - get_poly=dummy, get_shapepoly=dummy, getcanvas=dummy, getmethparlist=dummy, getpen=dummy, - getscreen=dummy, getshapes=dummy, getturtle=dummy, goto=dummy, heading=dummy, hideturtle=dummy, - home=dummy, ht=dummy, inspect=dummy, isdown=dummy, isfile=dummy, isvisible=dummy, join=dummy, - left=dummy, listen=dummy, lt=dummy, mainloop=dummy, math=dummy, mode=dummy, numinput=dummy, - onclick=dummy, ondrag=dummy, onkey=dummy, onkeypress=dummy, onkeyrelease=dummy, onrelease=dummy, - onscreenclick=dummy, ontimer=dummy, pd=dummy, pen=dummy, pencolor=dummy, pendown=dummy, - pensize=dummy, penup=dummy, pos=dummy, position=dummy, pu=dummy, radians=dummy, - read_docstrings=dummy, readconfig=dummy, register_shape=dummy, reset=dummy, resetscreen=dummy, - resizemode=dummy, right=dummy, rt=dummy, screensize=dummy, seth=dummy, setheading=dummy, - setpos=dummy, setposition=dummy, settiltangle=dummy, setundobuffer=dummy, setup=dummy, - setworldcoordinates=dummy, setx=dummy, sety=dummy, shape=dummy, shapesize=dummy, - shapetransform=dummy, shearfactor=dummy, showturtle=dummy, simpledialog=dummy, speed=dummy, - split=dummy, st=dummy, stamp=dummy, sys=dummy, textinput=dummy, tilt=dummy, tiltangle=dummy, - time=dummy, title=dummy, towards=dummy, tracer=dummy, turtles=dummy, turtlesize=dummy, types=dummy, - undo=dummy, undobufferentries=dummy, up=dummy, update=dummy, width=dummy, window_height=dummy, - window_width=dummy, write=dummy, write_docstringdict=dummy, xcor=dummy, ycor=dummy) - ''' - - -class MockPlt(MockModule): - """ - Mock MatPlotLib library that can be used to capture plot data. - - Attributes: - plots (list of dict): The internal list of plot dictionaries. - """ - - def __init__(self): - super().__init__() - self._reset_plots() - - def show(self, **kwargs): - self.plots.append(self.active_plot) - self._reset_plot() - - def unshown_plots(self): - return self.active_plot['data'] - - def __repr__(self): - return repr(self.plots) - - def __str__(self): - return str(self.plots) - - def _reset_plots(self): - self.plots = [] - self._reset_plot() - - def _reset_plot(self): - self.active_plot = {'data': [], - 'xlabel': None, 'ylabel': None, - 'title': None, 'legend': False} - - def hist(self, data, **kwargs): - label = kwargs.get('label', None) - self.active_plot['data'].append({'type': 'hist', 'values': data, - 'label': label}) - - def plot(self, xs, ys=None, **kwargs): - label = kwargs.get('label', None) - if ys is None: - self.active_plot['data'].append({'type': 'line', - 'x': list(range(len(xs))), - 'y': xs, 'label': label}) - else: - self.active_plot['data'].append({'type': 'line', 'x': xs, - 'y': ys, 'label': label}) - - def scatter(self, xs, ys, **kwargs): - label = kwargs.get('label', None) - self.active_plot['data'].append({'type': 'scatter', 'x': xs, - 'y': ys, 'label': label}) - - def xlabel(self, label, **kwargs): - self.active_plot['xlabel'] = label - - def title(self, label, **kwargs): - self.active_plot['title'] = label - - def suptitle(self, label, **kwargs): - self.title(label, **kwargs) - - def ylabel(self, label, **kwargs): - self.active_plot['ylabel'] = label - - def legend(self, **kwargs): - self.active_plot['legend'] = True - - def _generate_patches(self): - def dummy(**kwargs): - pass - - return dict(hist=self.hist, plot=self.plot, - scatter=self.scatter, show=self.show, - xlabel=self.xlabel, ylabel=self.ylabel, - title=self.title, legend=self.legend, - xticks=dummy, yticks=dummy, - autoscale=dummy, axhline=dummy, - axhspan=dummy, axvline=dummy, - axvspan=dummy, clf=dummy, - cla=dummy, close=dummy, - figlegend=dummy, figimage=dummy, - suptitle=self.suptitle, text=dummy, - tick_params=dummy, ticklabel_format=dummy, - tight_layout=dummy, xkcd=dummy, - xlim=dummy, ylim=dummy, - xscale=dummy, yscale=dummy) diff --git a/src/lib/pedal/sandbox/result.py b/src/lib/pedal/sandbox/result.py deleted file mode 100644 index e7d269d9f3..0000000000 --- a/src/lib/pedal/sandbox/result.py +++ /dev/null @@ -1,369 +0,0 @@ -class SandboxResult: - """ - Proxy class for wrapping results from executing student code. Attempts - to perfectly emulate the underlying data value, so that users will never - realize they have a proxy. The advantage is that special information is - available in the corresponding Sandbox about this result that can give - more context. - - Attributes: - value (any): The actual data stored in this class that we are proxying. - If the underlying proxy object has a field called `value`, then - you can use either `_actual_value` to access the proxied object. - _actual_call_id (int): The call that was used to generate this result. - _actual_sandbox (Sandbox): The sandbox that was used to generate this - result. If None, then the sandbox was lost. - - """ - ASSIGNABLE_ATTRS = ['value', '_actual_call_id', '_actual_sandbox', - '_clone_this_result'] - - def __init__(self, value, call_id=None, sandbox=None): - """ - Args: - value (any): Literally any type of data. - call_id (int): The unique call ID that generated this result. If - None, then the SandboxResult was generated by manipulating an earlier - result. - TODO: We could actually remember the operations applied to this - instance and use them to reconstruct the transformations... - sandbox (Sandbox): The sandbox that was used to generate this - result. If None, then the sandbox was lost. - """ - self.value = value - self._actual_call_id = call_id - self._actual_sandbox = sandbox - - def __getattribute__(self, name): - """ - Get the attribute with the given `name`. This allows us to pass - most attributes along to the underlying `value`, while still - maintaining access to the proxy's attributes. - """ - v = object.__getattribute__(self, "value") - if name == "__class__": - return v.__class__ - elif name == "__actual_class__": - return object.__getattribute__(self, "__class__") - elif name == "_actual_value": - return v - elif name in SandboxResult.ASSIGNABLE_ATTRS: - return object.__getattribute__(self, name) - elif name == "value" and not hasattr(v, "value"): - return v - else: - return SandboxResult(object.__getattribute__(v, name), - object.__getattribute__(self, "_actual_call_id"), - object.__getattribute__(self, "_actual_sandbox")) - - def __setattr__(self, name, value): - if name in SandboxResult.ASSIGNABLE_ATTRS: - object.__setattr__(self, name, value) - else: - setattr(self.value, name, value) - - def __delattr__(self, name): - if name in SandboxResult.ASSIGNABLE_ATTRS: - object.__delattr__(self, name, value) - else: - delattr(self.value, name, value) - - def _clone_this_result(self, new_value): - """ - Create a new SandboxResult based on this current one. Copies over the - `call_id` and `sandbox`. - - Args: - new_value (any): The new value to be proxying. - Returns: - SandboxResult - """ - return SandboxResult(new_value, - call_id=self._actual_call_id, - sandbox=self._actual_sandbox) - - def __repr__(self): - """ - Returns the representation of the proxied object. - - Returns: - str: The `repr` of the proxied object. - """ - return repr(self.value) - - def __str__(self): - """ - Returns the string representation of the proxied object. - - Returns: - str: The `str` of the proxied object. - """ - return str(self.value) - - def __bytes__(self): - return bytes(self.value) - - def __format__(self, format_spec): - return format(self.value, format_spec) - - def __call__(self, *args): - """ - Returns the result of calling the proxied object with the args. - - Returns: - SandboxResult: A proxy of the Sandbox object. - """ - return self._clone_this_result(self.value(*args)) - - def __hash__(self): - return hash(self.value) - - def __bool__(self): - return bool(self.value) - - def __dir__(self): - return dir(self.value) - - def __instancecheck__(self, instance): - return isinstance(self.value, instance) - - def __subclasscheck__(self, subclass): - return issubclass(self.value, subclass) - - def __len__(self): - ''' - Fun fact: cpython DEMANDS that __len__ return an integer. Not something - that looks like an integer, but a true, honest-to-god integer that - can fit into a slot. - https://stackoverflow.com/questions/42521449/how-does-python-ensure-the-return-value-of-len-is-an-integer-when-len-is-cal - ''' - return len(self.value) - - def __getitem__(self, key): - return self._clone_this_result(self.value[key]) - - def __setitem__(self, key, value): - self.value[key] = value - - def __delitem__(self, key): - del self.value[key] - - def __missing__(self, key): - return self.value.__missing__(key) - - def __iter__(self): - return iter(self.value) - - def __reversed__(self): - return reversed(self.value) - - def __contains__(self, item): - return self.value.__contains__(item) - - def __eq__(self, other): - """ - Test if the proxied object is equal to the given `other`. - - Args: - other (any): The other object. - - Returns: - bool or any: Returns whatever the proxy object's __eq__ returns. - """ - if isinstance(other, SandboxResult): - return self.value == other.value - return self.value == other - - def __lt__(self, other): - if isinstance(other, SandboxResult): - return self.value < other.value - return self.value < other - - def __le__(self, other): - if isinstance(other, SandboxResult): - return self.value <= other.value - return self.value <= other - - def __gt__(self, other): - if isinstance(other, SandboxResult): - return self.value > other.value - return self.value > other - - def __ge__(self, other): - if isinstance(other, SandboxResult): - return self.value >= other.value - return self.value >= other - - def __ne__(self, other): - if isinstance(other, SandboxResult): - return self.value != other.value - return self.value != other - - ## Numeric Operations - - def __add__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value + other.value) - return self._clone_this_result(self.value + other) - - def __sub__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value - other.value) - return self._clone_this_result(self.value - other) - - def __mul__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value * other.value) - return self._clone_this_result(self.value * other) - - def __matmul__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value.__matmul__(other.value)) - return self._clone_this_result(self.value.__matmul__(other)) - - def __truediv__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value.__truediv__(other.value)) - return self._clone_this_result(self.value.__truediv__(other)) - - def __floordiv__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value.__floordiv__(other.value)) - return self._clone_this_result(self.value.__floordiv__(other)) - - def __mod__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value.__mod__(other.value)) - return self._clone_this_result(self.value.__mod__(other)) - - def __divmod__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value.__divmod__(other.value)) - return self._clone_this_result(self.value.__divmod__(other)) - - def __pow__(self, other, *modulo): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value.__pow__(other.value, *modulo)) - return self._clone_this_result(self.value.__pow__(other, *modulo)) - - def __lshift__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value.__lshift__(other.value)) - return self._clone_this_result(self.value.__lshift__(other)) - - def __rshift__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value.__rshift__(other.value)) - return self._clone_this_result(self.value.__rshift__(other)) - - def __and__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value.__and__(other.value)) - return self._clone_this_result(self.value.__and__(other)) - - def __xor__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value.__xor__(other.value)) - return self._clone_this_result(self.value.__xor__(other)) - - def __or__(self, other): - if isinstance(other, SandboxResult): - return self._clone_this_result(self.value.__or__(other.value)) - return self._clone_this_result(self.value.__or__(other)) - - def __radd__(self, other): - if isinstance(self.value, str): - return self._clone_this_result(self.value.__add__(other)) - return self._clone_this_result(self.value.__radd__(other)) - - def __rsub__(self, other): - return self._clone_this_result(self.value.__rsub__(other)) - - def __rmul__(self, other): - return self._clone_this_result(self.value.__rmul__(other)) - - def __rmatmul__(self, other): - return self._clone_this_result(self.value.__rmatmul__(other)) - - def __rtruediv__(self, other): - return self._clone_this_result(self.value.__rtruediv__(other)) - - def __rfloordiv__(self, other): - return self._clone_this_result(self.value.__rfloordiv__(other)) - - def __rmod__(self, other): - return self._clone_this_result(self.value.__rmod__(other)) - - def __rdivmod__(self, other): - return self._clone_this_result(self.value.__rdivmod__(other)) - - def __rpow__(self, other): - return self._clone_this_result(self.value.__rpow__(other)) - - def __rlshift__(self, other): - return self._clone_this_result(self.value.__rlshift__(other)) - - def __rand__(self, other): - return self._clone_this_result(self.value.__rand__(other)) - - def __rxor__(self, other): - return self._clone_this_result(self.value.__rxor__(other)) - - def __ror__(self, other): - return self._clone_this_result(self.value.__ror__(other)) - - ## TODO: __iadd__ and other in-place assignment operators? - - def __neg__(self): - return self._clone_this_result(self.value.__neg__()) - - def __pos__(self): - return self._clone_this_result(self.value.__pos__()) - - def __abs__(self): - return self._clone_this_result(self.value.__abs__()) - - def __invert__(self): - return self._clone_this_result(self.value.__invert__()) - - def __complex__(self): - return self._clone_this_result(self.value.__complex__()) - - def __int__(self): - return self._clone_this_result(self.value.__int__()) - - def __float__(self): - return self._clone_this_result(self.value.__float__()) - - def __round__(self, *ndigits): - return self._clone_this_result(self.value.__round__(*ndigits)) - - def __trunc__(self): - return self._clone_this_result(self.value.__trunc__()) - - def __floor__(self): - return self._clone_this_result(self.value.__floor__()) - - def __ceil__(self): - return self._clone_this_result(self.value.__ceil__()) - - def __enter__(self): - return self.value.__enter__() - - def __exit__(self, exc_type, exc_value, traceback): - return self.value.__exit__(exc_type, exc_value, traceback) - - def __await__(self): - return self.value.__await__() - - def __aiter__(self): - return self.value.__aiter__() - - def __anext__(self): - return self.value.__anext__() - - def __aenter__(self): - return self.value.__aenter__() - - def __aexit__(self, exc_type, exc_value, traceback): - return self.value.__aexit__(exc_type, exc_value, traceback) diff --git a/src/lib/pedal/sandbox/sandbox.py b/src/lib/pedal/sandbox/sandbox.py deleted file mode 100644 index 156890bbe3..0000000000 --- a/src/lib/pedal/sandbox/sandbox.py +++ /dev/null @@ -1,726 +0,0 @@ -from pprint import pprint -import ast -import re -import sys -import io -import os -import string -from unittest.mock import patch - -from pedal.report import MAIN_REPORT -from pedal.sandbox import mocked -from pedal.sandbox.exceptions import (SandboxTraceback, SandboxHasNoFunction, - SandboxStudentCodeException, - SandboxHasNoVariable, _add_context_to_error) -from pedal.sandbox.timeout import timeout -from pedal.sandbox.messages import EXTENDED_ERROR_EXPLANATION -from pedal.sandbox.result import SandboxResult -from pedal.sandbox.tracer import (SandboxCallTracer, SandboxCoverageTracer, - SandboxBasicTracer) - - -def _dict_extends(d1, d2): - """ - Helper function to create a new dictionary with the contents of the two - given dictionaries. Does not modify either dictionary, and the values are - copied shallowly. If there are repeats, the second dictionary wins ties. - - The function is written to ensure Skulpt compatibility. - - Args: - d1 (dict): The first dictionary - d2 (dict): The second dictionary - Returns: - dict: The new dictionary - """ - d3 = {} - for key, value in d1.items(): - d3[key] = value - for key, value in d2.items(): - d3[key] = value - return d3 - - -class SandboxVariable: - def __init__(self, name, value): - self.name = name - self.value = value - - -class DataSandbox: - """ - Simplistic Mixin class that contains the functions for accessing a - self-contained student data namespace. - """ - - def __init__(self): - super().__init__() - self.data = {} - - def get_names_by_type(self, type, exclude_builtins=True): - result = [] - for name, value in self.data.items(): - if isinstance(value, type): - if exclude_builtins and name.startswith('__'): - continue - result.append(name) - return result - - def get_values_by_type(self, type, exclude_builtins=True): - names = self.get_names_by_type(type, exclude_builtins) - return [self.data[name] for name in names] - - def get_variables_by_type(self, type, exclude_builtins=True): - names = self.get_names_by_type(type, exclude_builtins) - return [(name, self.data[name]) for name in names] - - @property - def functions(self): - """ - Retrieve a list of all the callable names in the students' namespace. - In other words, get a list of all the functions the student defined. - - Returns: - list of callables - """ - return {k: v for k, v in self.data.items() if callable(v)} - - @property - def var(self): - return {k: SandboxVariable(k, v) for k, v in self.data.items()} - - def __repr__(self): - return "" - - -class Sandbox(DataSandbox): - """ - - The Sandbox is a container that can safely execute student code and store - the result. - - Attributes: - data: The namespace produced by the students' code. This is basically - a dictionary mapping valid python names to their values. - raw_output (str): The exact literal results of all the `print` calls - made so far, including the "\n" characters. - output (list of str): The current lines of output, broken up by - distinct print calls (not "\n" characters). Note that this will - not have any "\n" characters unless you explicitly printed them. - output_contexts (dict[str:list[str]]): The output for each call context. - call_id (int): The current call_id of the most recent call. Is - initially 0, indicating the original sandbox creation. - modules: A dictionary of the mocked modules (accessible by their - imported names). - context: A list of strings representing the code previously run through - this sandbox via .call. - contextualize (bool): Whether or not to contextualize stack frames. - """ - - CONTEXT_MESSAGE = ( - "\n\nThe error above occurred when I ran:
    \n
    {context}
    " - ) - FILE_CONTEXT_MESSAGE = ( - "\n\nThe error above occurred when I ran your file: {filename}" - ) - INPUT_CONTEXT_MESSAGE = ( - "And entered the inputs:\n```\n{inputs}\n```" - ) - TRACER_STYLES = { - 'coverage': SandboxCoverageTracer, - 'calls': SandboxCallTracer, - 'none': SandboxBasicTracer, - } - - def __init__(self, initial_data=None, - initial_raw_output=None, - initial_exception=None, - modules=None, full_traceback=False, - tracer_style='none', - threaded=False, report=None, - context=None, result_proxy=SandboxResult, - instructor_filename="instructor_tests.py", - allowed_functions=None): - """ - Args: - initial_data (dict[str:Any]): An initial namespace to provide when - executing the students' code. The keys must be strings and - should be valid Python names. Defaults to None, which will be - an empty namespace. - initial_exception (Exception): An initial exception to load into - the Sandbox. Usually you will let the students' code generate - its own exceptions, but if you're constructing a sandbox you - might need to specify one. Defaults to None. - modules: A dictionary of strings (valid python package names) that - map to either the value True (if we provide a default - implementation) or a user-created MockedModule. By default, - we mock out the following modules: - * matplotlib - * pedal - context (False, None, or list[str]): How to contextualize calls by - default in this Sandbox. False means no contextualization. - None (default) means contextualize automatically. If you give - a list[str], then it assumes you want to contextualize - automatically but starting off with the given strings. - initial_raw_output (str): The initial printed output for the - sandbox. Usually defaults to None to indicate a blank printed - area. - instructor_filename (str): The filename to display in tracebacks, - when executing student code in instructor tests. Although you - can specify something else, defaults to "instructor_tests.py". - """ - super().__init__() - if initial_data is None: - initial_data = {} - self.data = initial_data - - # Context - self.call_id = 0 - self.target_contexts = {self.call_id: []} - self.call_contexts = {self.call_id: []} - self.input_contexts = {self.call_id: []} - self.context = context - self.keep_context = False - # Update outputs - self.set_output(initial_raw_output) - # filename - self.instructor_filename = instructor_filename - # Temporary data - self._temporaries = set() - self._backups = {} - # Exception - self.exception = initial_exception - self.exception_position = None - self.exception_formatted = None - self.report_exceptions_mode = False - self.raise_exceptions_mode = False - # Input - self.set_input(None) - self._input_tracker = self._track_inputs() - # Modules - if modules is None: - modules = {'matplotlib': True, - 'pedal': mocked.MockPedal() - } - self.mocked_modules = {} - self.modules = {} - self.add_mocks(modules) - self.mocked_functions = { - 'compile': mocked._disabled_compile, - 'eval': mocked._disabled_eval, - 'exec': mocked._disabled_exec, - 'globals': mocked._disabled_globals, - 'open': mocked._restricted_open, - '__import__': mocked._restricted_import, - } - if allowed_functions is not None: - for function_name in allowed_functions: - if function_name in self.mocked_functions: - del self.mocked_functions[function_name] - # Patching - self._current_patches = [] - # Settings - self.full_traceback = full_traceback - self.MAXIMUM_VALUE_LENGTH = 120 - # Tracer Styles - self.tracer_style = tracer_style - # Proxying results - self.result_proxy = result_proxy - # report - if report is None: - report = MAIN_REPORT - self.report = report - # Threading - self.threaded = threaded - self.allowed_time = 3 - - def _set_tracer_style(self, tracer_style): - self._tracer_style = tracer_style.lower() - self.trace = self.TRACER_STYLES[tracer_style.lower()]() - - def _get_tracer_style(self): - return self._tracer_style - - tracer_style = property(_get_tracer_style, _set_tracer_style) - - def add_mocks(self, modules): - """ - :param modules: Keyword listing of modules and their contents - (MockedModules) or True (if its one that we have a - default implementation for). - :type modules: dict - """ - for module_name, module_data in modules.items(): - self._add_mock(module_name, module_data) - - def _add_mock(self, module_name, module_data): - # MatPlotLib's PyPlot - if module_name == 'matplotlib': - matplotlib, modules = mocked.create_module('matplotlib.pyplot') - self.mocked_modules.update(modules) - if module_data is True: - mock_plt = mocked.MockPlt() - mock_plt._add_to_module(matplotlib.pyplot) - self.modules['matplotlib.pyplot'] = mock_plt - else: - module_data._add_to_module(matplotlib.pyplot) - else: - root, modules = mocked.create_module(module_name) - self.mocked_modules.update(modules) - self.modules[module_name] = module_data - module_data._add_to_module(root) - - def set_output(self, raw_output): - """ - Change the current printed output for the sandbox to the given value. - If None is given, then clears all the given output (empty list for - `output` and empty string for `raw_output`). - - Args: - raw_output (str): The new raw_output for the sandbox. To compute - the `output` attribute, the system splits and rstrips at - newlines. - """ - if raw_output is None: - self.raw_output = "" - self.output = [] - self.output_contexts = {self.call_id: list(self.output)} - else: - self.raw_output = raw_output - lines = raw_output.rstrip().split("\n") - self.output = [line.rstrip() for line in lines] - self.output_contexts[self.call_id] = list(self.output) - - def append_output(self, raw_output): - """ - Adds the string of `raw_output` to the current `raw_output` attribute. - The added string will be split on newlines and rstripped to append - to the `output` attribute. - - Args: - raw_output (str): The new raw_output for the sandbox. To compute - the `output` attribute, the system splits and rstrips at - newlines. - """ - self.raw_output += raw_output - lines = raw_output.rstrip().split("\n") - lines = [line.rstrip() for line in lines] - if self.raw_output: - self.output.extend(lines) - self.output_contexts[self.call_id].extend(lines) - - def set_input(self, inputs, clear=True): - """ - Queues the given value as the next arguments to the `input` function. - """ - if inputs is None: - self.inputs = [] - if clear: - self.inputs.clear() - if isinstance(inputs, str): - self.inputs.append(inputs) - elif isinstance(inputs, (list, tuple)): - self.inputs.extend(inputs) - elif inputs is not None: - # TODO: intelligently handle custom generator - self.inputs = inputs - - def _track_inputs(self): - """ - Wraps an input function with a tracker. - """ - - def _input_tracker(*args, **kwargs): - if args: - prompt = args[0] - else: - prompt = "" - print(prompt) - if self.inputs: - value_entered = self.inputs.pop(0) - else: - # TODO: Make this smarter, more elegant in choosing IF we should repeat 0 - value_entered = '0' - self.input_contexts[self.call_id].append(value_entered) - return value_entered - - return _input_tracker - - def _purge_temporaries(self): - """ - Delete any variables in the namespace that have been made as - temporaries. This happens automatically after you execute code. - """ - for key in self._temporaries: - if key in self._backups: - self.data[key] = self.backups[key] - else: - del self.data[key] - self._temporaries = set() - - def _is_long_value(self, value): - return len(repr(value)) > 25 - - def _make_temporary(self, category, name, value, context): - """ - Create a temporary variable in the namespace for the given - category/name. This is used to load arguments into the namespace to - be used in function calls. Temporaries are only created if the value's - repr length is too long, as defined by _is_long_value. - - Args: - category (str): A categorical division for the temporary variable - that can help keep the namespace distinctive - there are a - few different kinds of categories (e.g., for regular positional - args, star args, kwargs). - name (str): A distinctive ID for this variable. The final variable - name will be "_temporary__". - value: The value for this argument. - Returns: - str: The new name for the temporary variable. - """ - if isinstance(value, SandboxVariable): - return value.name - if not self._is_long_value(value): - return repr(value) - key = '_temporary_{}_{}'.format(category, name) - if key in self.data: - self._backups[key] = self.data[key] - self._temporaries.add(key) - self.data[key] = value - if context is None: - self.call_contexts[self.call_id].append("{} = {}".format(key, value)) - return key - - def run_file(self, filename, as_filename=None, modules=None, inputs=None, - threaded=None, context=None, report_exceptions=None, - raise_exceptions=None): - """ - Load the given filename and execute it within the current namespace. - - Args: - context (False, None, or list[str]): The context to give any - exceptions. If None, then the recorded context will be used. If - a string, tracebacks will be shown with the given context. If - False, no context will be given. - """ - if as_filename is None: - as_filename = filename - with open(filename, 'r') as code_file: - code = code_file.read() + '\n' - self.run(code, as_filename, modules, inputs, threaded, - context, report_exceptions, raise_exceptions) - - def list(self, *args): - pass - - def call(self, function, *args, **kwargs): - """ - Args: - function (str): The name of the function to call that was defined - by the user. - as_filename (str): The filename to use when calling this function. - Defaults to the instructor filename, since you are calling - code on the student's behalf. - target (str): The new variable in the namespace to assign to. By - default this will be "_". If you use None, then no variable - will be assigned to. Note that this could overwrite a variable - in the user namespace. - TODO: Add a feature to prevent user namespace overwriting. - input (list of str): The strings to send in to calls to input. - You can also pass in a generator to construct strings - dynamically. - threaded (bool): Whether or not the function execution should be - executed in a separate thread. Defaults to True. This prevents - timeouts from occuring in the students' code (a TimeOutError - will be thrown after 3 seconds). - context (False, None, or list[str]): The context to give any - exceptions. If None, then the recorded context will be used. If - a string, tracebacks will be shown with the given context. If - False, no context will be given. - keep_context (bool): Whether or not to stay in the current context, - or to start a new one. Defaults to False. - Returns: - If the call was successful, returns the result of executing the - code. Otherwise, it will return an Exception relevant to the - failure (might be a SandboxException, might be a user-space - exception). - """ - # Confirm that the function_name exists - if function not in self.functions: - if function not in self.data: - self.exception = SandboxHasNoVariable( - "The function {function} does not exist.".format(function=function) - ) - else: - self.exception = SandboxHasNoFunction( - "The variable {function} is not a function.".format(function=function) - ) - return self.exception - # Parse kwargs for any special arguments. - as_filename = kwargs.pop('as_filename', self.instructor_filename) - target = kwargs.pop('target', '_') - modules = kwargs.pop('modules', {}) - inputs = kwargs.pop('inputs', None) - threaded = kwargs.pop('threaded', self.threaded) - context = kwargs.pop('context', self.context) - keep_context = kwargs.pop('keep_context', self.keep_context) - report_exceptions = kwargs.pop('report_exceptions', self.report_exceptions_mode) - raise_exceptions = kwargs.pop('raise_exceptions', self.raise_exceptions_mode) - # Create the actual arguments and call - if not keep_context or not self.call_id: - self.call_id += 1 - self.output_contexts[self.call_id] = [] - self.call_contexts[self.call_id] = [] - self.input_contexts[self.call_id] = [] - # Always update the target context to be most recent - self.target_contexts[self.call_id] = target - actual, student = self._construct_call(function, args, kwargs, target, - context) - if context is None: - context = student - # if context is None: - # self.call_contexts[self.call_id].append(student_call) - # if context is not False: - # self.call_contexts[self.call_id] = context - self.run(actual, as_filename=as_filename, modules=modules, - inputs=inputs, threaded=threaded, - context=context, keep_context=keep_context, - report_exceptions=report_exceptions, - raise_exceptions=raise_exceptions) - self._purge_temporaries() - if self.exception is None: - self._ = self.data[target] - if self.result_proxy is not None: - self._ = self.result_proxy(self._, call_id=self.call_id, - sandbox=self) - return self._ - else: - # TODO: Might need to wrap this in case the student was supposed - # to return an exception - weird circumstance though - return self.exception - - def make_safe_variable(self, name): - """ - Tries to construct a safe variable name in the current namespace, based - off the given one. This is accomplished by appending a "_" and a number - of increasing value until no comparable name exists in the namespace. - This is particularly useful when you want to create a variable name to - assign to, but you are concerned that the user might have a variable - with that name already, which their code relies on. - - Args: - name (str): A desired target name. - Returns: - str: A safe target name, based off the given one. - """ - current_addition = "" - attempt_index = 2 - while name + current_addition in self.data: - current_addition = "_{}".format(attempt_index) - attempt_index += 1 - return name + current_addition - - def _construct_call(self, function, args, kwargs, target, context): - str_args = [self._make_temporary('arg', index, value, context) - for index, value in enumerate(args)] - str_kwargs = ["{}={}".format(key, - self._make_temporary('kwarg', key, value, context)) - for key, value in kwargs.items()] - arguments = ", ".join(str_args + str_kwargs) - call = "{}({})".format(function, arguments) - if target is None: - actual = call - else: - actual = "{} = {}".format(target, call) - student_call = call if target is "_" else actual - return actual, student_call - - def _start_patches(self, *patches): - self._current_patches.append(patches) - for patch in patches: - patch.start() - - def _stop_patches(self): - if not self._current_patches: - return - patches = self._current_patches.pop() - for patch in patches: - patch.stop() - - def _capture_exception(self, exception, exc_info, report_exceptions, - raise_exceptions, context, keep_context, - as_filename="", code=""): - self.exception = exception - if context is not False: - if context is None or keep_context: - contexts = self.call_contexts[self.call_id] - if context is not None: - contexts.append(context) - context = '\n'.join(contexts) # [1:]) - if context.strip(): - context = self.CONTEXT_MESSAGE.format(context=context) - inputs = self.input_contexts[self.call_id] - if inputs is not None and inputs: - inputs = "\n".join(inputs) - context += "\n" + self.INPUT_CONTEXT_MESSAGE.format(inputs=inputs) - else: - context = self.FILE_CONTEXT_MESSAGE.format(filename=self.report['source']['filename']) - self.exception = _add_context_to_error(self.exception, context) - line_offset = self.report['source'].get('line_offset', 0) - student_filename = self.report['source'].get('filename', as_filename) - if 'lines' in self.report['source']: - lines = self.report['source']['lines'] - else: - lines = code.split("\n") - traceback = SandboxTraceback(self.exception, exc_info, - self.full_traceback, - self.instructor_filename, - line_offset, student_filename, - lines) - self.exception_position = {'line': traceback.line_number} - self.exception_formatted = traceback.format_exception() - self.exception_name = str(self.exception.__class__)[8:-2] - # Do we add the exception to the report? - if report_exceptions is False: - return True - if report_exceptions is None and not self.report_exceptions_mode: - return True - self.report.attach(self.exception_name, - group=self.report.group, - category='Runtime', tool='Sandbox', - mistake={'message': self.exception_formatted, - 'error': self.exception}) - if raise_exceptions is True: - raise SandboxStudentCodeException(self.exception) - return False - - def run(self, code, as_filename=None, modules=None, inputs=None, - threaded=None, report_exceptions=True, raise_exceptions=False, - context=False, keep_context=False): - """ - Execute the given string of code in this sandbox. - - Args: - code (str): The string of code to be executed. - as_filename (str): The filename to use when executing the code - - this is cosmetic, technically speaking, it has no relation - to anything on disk. It will be present in tracebacks. - Defaults to Source's filename. - modules (dict[str:Module]): Modules to mock. - inputs (list[str]): The inputs to give from STDIN, as a list of - strings. You can also give a function that emulates the - input function; e.g., consuming a prompt (str) and producing - strings. This could be used to make a more interactive input - system. - context (str): The context to give any exceptions. - If None, then the recorded context will be used. If a string, - tracebacks will be shown with the given context. If False, - no context will be given (the default). - threaded (bool): whether or not to run this code in a separate - thread. Defaults to :attribute:`Sandbox.threaded`. - report_exceptions (bool): Whether or not to capture exceptions. - """ - # Handle any threading if necessary - if threaded is None: - threaded = self.threaded - if threaded: - try: - return timeout(self.allowed_time, self.run, code, as_filename, - modules, inputs, False, - report_exceptions, raise_exceptions, - context, keep_context) - except TimeoutError as timeout_exception: - self._stop_patches() - self._capture_exception(timeout_exception, sys.exc_info(), - report_exceptions, raise_exceptions, - context, keep_context, as_filename, - code) - return self - - if as_filename is None: - as_filename = os.path.basename(self.report['source']['filename']) - # todo: earlier version of inputs being made? - if inputs is not None: - self.set_input(inputs) - # Override builtins and mock stuff out - mocked_functions = self.mocked_functions.copy() - mocked_functions['input'] = self._input_tracker - mocked_functions['raw_input'] = self._input_tracker - mocked_functions['sys'] = sys - mocked_functions['os'] = os - mocked._override_builtins(self.data, mocked_functions) - - self.exception = None - self.exception_position = None - self.exception_formatted = None - - # Patch in dangerous built-ins - x = sys.stdout - capture_stdout = io.StringIO() - self._start_patches( - patch.dict('sys.modules', self.mocked_modules), - patch('sys.stdout', capture_stdout), - patch('time.sleep', return_value=None), - ) - # TODO: Hack, add more flexibile way to specify unusable modules - for module in list(sys.modules.keys()): - if module.startswith('pedal.'): - del sys.modules[module] - try: - compiled_code = compile(code, as_filename, 'exec') - with self.trace._as_filename(as_filename, code): - exec(compiled_code, self.data) - except Exception as user_exception: - self._stop_patches() - info = sys.exc_info() - self._capture_exception(user_exception, info, - report_exceptions, raise_exceptions, - context, keep_context, as_filename, - code) - else: - self._stop_patches() - finally: - self.append_output(capture_stdout.getvalue()) - if context is None: - self.call_contexts[self.call_id].append(code) - elif isinstance(context, str): - self.call_contexts[self.call_id].append(context) - elif context is not False: - self.call_contexts[self.call_id] = context - return self - - -def run(initial_data=None, initial_raw_output=None, initial_exception=None, - allowed_functions=None, - modules=None, inputs=None, report_exceptions=True, raise_exceptions=False, - context=None, - full_traceback=False, tracer_style='none', threaded=False, - result_proxy=SandboxResult, - instructor_filename="instructor_tests.py", - code=None, as_filename=None, report=None): - if report is None: - report = MAIN_REPORT - if 'run' not in report['sandbox']: - report['sandbox']['settings'] = [ - initial_data, initial_raw_output, initial_exception, modules, - full_traceback, tracer_style, threaded, report, context, - result_proxy, instructor_filename, allowed_functions - ] - report['sandbox']['run'] = Sandbox(*report['sandbox']['settings']) - - sandbox = report['sandbox']['run'] - if code is None: - code = report['source']['code'] - sandbox.run(code, as_filename, modules, inputs, threaded, - report_exceptions, raise_exceptions, context=context, keep_context=False) - return sandbox - - -def reset(report=None): - if report is None: - report = MAIN_REPORT - if 'settings' in report['sandbox']: - report['sandbox']['run'] = Sandbox(*report['sandbox']['settings']) - else: - run(report=report) diff --git a/src/lib/pedal/sandbox/timeout.py b/src/lib/pedal/sandbox/timeout.py deleted file mode 100644 index cb88f6de05..0000000000 --- a/src/lib/pedal/sandbox/timeout.py +++ /dev/null @@ -1,2 +0,0 @@ -def timeout(delay, func, *args, **kwargs): - return func(*args, **kwargs) \ No newline at end of file diff --git a/src/lib/pedal/sandbox/tracer.py b/src/lib/pedal/sandbox/tracer.py deleted file mode 100644 index f63d306306..0000000000 --- a/src/lib/pedal/sandbox/tracer.py +++ /dev/null @@ -1,108 +0,0 @@ -import sys -import os - -try: - import coverage -except ImportError: - coverage = None - -try: - from bdb import Bdb, BdbQuit -except Exception: - class Bdb: - pass - - - class BdbQuit: - pass - - -class SandboxBasicTracer: - def __init__(self): - super().__init__() - self.filename = "student.py" - - def _as_filename(self, filename, code): - if os.path.isabs(filename): - self.filename = filename - else: - self.filename = os.path.abspath(filename) - self.code = code - return self - - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_val, traceback): - pass - - -class SandboxCoverageTracer(SandboxBasicTracer): - def __init__(self): - super().__init__() - if coverage is None: - raise ImportError("The coverage package is not available.") - self.n_missing = None - self.n_statements = None - self.pc_covered = None - self.missing = set() - self.lines = set() - # self.s = sys.stdout - - def __enter__(self): - # Force coverage to accept the code - self.original = coverage.python.get_python_source - - def _get_source_correctly(reading_filename): - print(reading_filename, file=self.s) - if reading_filename == self.filename: - return self.code - else: - return self.original(reading_filename) - - coverage.python.get_python_source = _get_source_correctly - self.coverage = coverage.Coverage() - self.coverage.start() - - def __exit__(self, exc_type, exc_val, traceback): - self.coverage.stop() - self.coverage.save() - # Restore the get_python_source reader - coverage.python.get_python_source = self.original - self.original = None - # Actually analyze the data, attach some data - analysis = self.coverage._analyze(self.filename) - # print(vars(self.coverage._analyze(self.filename)), file=self.s) - self.n_missing = analysis.numbers.n_missing - self.n_statements = analysis.numbers.n_statements - self.pc_covered = analysis.numbers.pc_covered - self.missing = analysis.missing - self.lines = analysis.statements - analysis.missing - - @property - def percent_covered(self): - return self.pc_covered - - -class SandboxCallTracer(SandboxBasicTracer, Bdb): - def __init__(self): - super().__init__() - self.calls = {} - - def user_call(self, frame, argument_list): - code = frame.f_code - name = code.co_name - if name not in self.calls: - self.calls[name] = [] - self.calls[name].append(code) - - def __enter__(self): - self.reset() - self._old_trace = sys.gettrace() - sys.settrace(self.trace_dispatch) - - def __exit__(self, exc_type, exc_val, traceback): - sys.settrace(self._old_trace) - self.quitting = True - # Return true to suppress exception (if it is a BdbQuit) - return isinstance(exc_type, BdbQuit) diff --git a/src/lib/pedal/sk_mod_instructor_list.txt b/src/lib/pedal/sk_mod_instructor_list.txt deleted file mode 100644 index 5203e8c5d9..0000000000 --- a/src/lib/pedal/sk_mod_instructor_list.txt +++ /dev/null @@ -1,40 +0,0 @@ -GracefulExit - -StudentData (instance is `student`) - __init__(self) - get_names_by_type(self, type, exclude_builtins) - get_values_by_type(self, type, exclude_builtins) -get_output() -reset_output() -queue_input(*inputs) -get_program() -run_student() - -parse_program() -had_execution_time_error() -limit_execution_time() -unlimit_execution_time() -analyze_program() - -def_use_error(AstNode) - -CorruptedAstNode - __init__(self) -find_match(instructor_code) -find_matches(instructor_code) - -ASTMap - __init__(self, JSAstMap) - get_std_name(self, id) - get_std_exp(self, id) - -AstNode - __init__(self, id) - __eq__(self, other) - numeric_logic_check(self, mag, expr) - __str__(self) - __repr__(self) - __getattr__(self, key) - has(self, AstNode) - find_all(self, type) - \ No newline at end of file diff --git a/src/lib/pedal/source/__init__.py b/src/lib/pedal/source/__init__.py deleted file mode 100644 index 4033ae3e05..0000000000 --- a/src/lib/pedal/source/__init__.py +++ /dev/null @@ -1,110 +0,0 @@ -""" -A package for verifying source code. -""" - -from pedal.source.sections import * -from pedal.report import MAIN_REPORT -import re -import ast - -NAME = 'Source' -SHORT_DESCRIPTION = "Verifies source code and attaches it to the report" -DESCRIPTION = ''' -''' -REQUIRES = [] -OPTIONALS = [] -CATEGORY = 'Syntax' - -__all__ = ['NAME', 'DESCRIPTION', 'SHORT_DESCRIPTION', 'REQUIRES', 'OPTIONALS', - 'set_source', 'check_section_exists', 'next_section', 'verify_section', - 'set_source_file'] -DEFAULT_PATTERN = r'^(##### Part .+)$' - - -def set_source(code, filename='__main__.py', sections=False, independent=False, - report=None): - """ - Sets the contents of the Source to be the given code. Can also be - optionally given a filename. - - Args: - code (str): The contents of the source file. - filename (str): The filename of the students' code. Defaults to - __main__.py. - sections (str or bool): Whether or not the file should be divided into - sections. If a str, then it should be a - Python regular expression for how the sections - are separated. If False, there will be no - sections. If True, then the default pattern - will be used: '^##### Part (\\d+)$' - report (Report): The report object to store data and feedback in. If - left None, defaults to the global MAIN_REPORT. - """ - if report is None: - report = MAIN_REPORT - report['source']['code'] = code - report['source']['full'] = code - report['source']['lines'] = code.split("\n") - report['source']['filename'] = filename - report['source']['independent'] = independent - report['source']['success'] = True - if not sections: - report['source']['sections'] = None - report['source']['section'] = None - _check_issues(code, report) - else: - if sections: - pattern = DEFAULT_PATTERN - else: - pattern = sections - report.group = 0 - report['source']['section_pattern'] = pattern - report['source']['section'] = 0 - report['source']['line_offset'] = 0 - report['source']['sections'] = re.split(pattern, code, - flags=re.MULTILINE) - report['source']['code'] = report['source']['sections'][0] - - -def _check_issues(code, report): - if code.strip() == '': - report.attach('Blank source', category='Syntax', tool=NAME, - group=report['source']['section'], - mistake="Source code file is blank.") - report['source']['success'] = False - try: - parsed = ast.parse(code, report['source']['filename']) - report['source']['ast'] = parsed - except SyntaxError as e: - report.attach('Syntax error', category='Syntax', tool='Source', - group=report['source']['section'], - mistake={'message': "Invalid syntax on line " - + str(e.lineno), - 'error': e, - 'position': {"line": e.lineno}}) - report['source']['success'] = False - report['source']['ast'] = ast.parse("") - - -def get_program(report=None): - if report is None: - report = MAIN_REPORT - return report['source']['code'] - - -def set_source_file(filename, sections=False, independent=False, report=None): - if report is None: - report = MAIN_REPORT - try: - with open(filename, 'r') as student_file: - set_source(student_file.read(), filename=filename, - sections=sections, independent=independent, - report=report) - except IOError: - message = ("The given filename ('{filename}') was either not found" - " or could not be opened. Please make sure the file is" - " available.").format(filename=filename) - report.attach('Source File Not Found', category='Syntax', tool='Source', - group=0 if sections else None, - mistake={'message': message}) - report['source']['success'] = False diff --git a/src/lib/pedal/source/sections.py b/src/lib/pedal/source/sections.py deleted file mode 100644 index a4b9a4bbf2..0000000000 --- a/src/lib/pedal/source/sections.py +++ /dev/null @@ -1,134 +0,0 @@ -from pedal.report import MAIN_REPORT -import ast - - -# def move_to_section(section_number, name, report=None): -# pass - -def _calculate_section_number(section_index): - return int((section_index + 1) / 2) - - -def next_section(name="", report=None): - if report is None: - report = MAIN_REPORT - report.execute_hooks('source.next_section.before') - source = report['source'] - # if not report['source']['success']: - # return False - source['section'] += 2 - section_index = source['section'] - section_number = _calculate_section_number(section_index) - sections = source['sections'] - found = len(source['sections']) - if section_index < found: - if source['independent']: - source['code'] = ''.join(sections[section_index]) - old_code = ''.join(sections[:section_index]) - source['line_offset'] = len(old_code.split("\n")) - 1 - else: - source['code'] = ''.join(sections[:section_index + 1]) - report.group = section_index - else: - report.attach('Syntax error', category='Syntax', tool='Source', - mistake=("Tried to advance to next section but the " - "section was not found. Tried to load section " - "{count}, but there were only {found} sections." - ).format(count=section_number, found=found)) - - -def check_section_exists(section_number, report=None): - """ - Checks that the right number of sections exist. The prologue before the - first section is 0, while subsequent ones are 1, 2, 3, etc. - So if you have 3 sections in your code plus the prologue, - you should pass in 3 and not 4 to verify that all of them exist. - """ - if report is None: - report = MAIN_REPORT - if not report['source']['success']: - return False - found = int((len(report['source']['sections']) - 1) / 2) - if section_number > found: - report.attach('Syntax error', category='Syntax', tool='Source', - group=report['source']['section'], - mistake=("Incorrect number of sections in your file. " - "Expected {count}, but only found {found}" - ).format(count=section_number, found=found)) - - -def verify_section(report=None): - if report is None: - report = MAIN_REPORT - source = report['source'] - # if not source['success']: - # return False - code = source['code'] - try: - parsed = ast.parse(code, source['filename']) - source['ast'] = parsed - except SyntaxError as e: - report.attach('Syntax error', category='Syntax', tool='Source', - group=source['section'], - mistake={'message': "Invalid syntax on line " - + str(e.lineno + source['line_offset']) + "\n", - 'error': e, - 'position': {"line": e.lineno}}) - source['success'] = False - if 'ast' in source: - del source['ast'] - return source['success'] - - -class _finish_section: - def __init__(self, number, *functions): - if isinstance(number, int): - self.number = number - else: - self.number = -1 - functions = [number] + list(functions) - self.functions = functions - for function in functions: - self(function, False) - - def __call__(self, f=None, quiet=True): - if f is not None: - f() - if quiet: - print("\tNEXT SECTION") - - def __enter__(self): - pass - - def __exit__(self, x, y, z): - print("\tNEXT SECTION") - # return wrapped_f - - -def finish_section(number, *functions, **kwargs): - if 'next_section' in kwargs: - next_section = kwargs['next_section'] - else: - next_section = False - if len(functions) == 0: - x = _finish_section(number, *functions) - x() - else: - result = _finish_section(number, *functions) - if next_section: - print("\tNEXT SECTION") - return result - - -def section(number): - """ - """ - pass - - -def precondition(function): - pass - - -def postcondition(function): - pass diff --git a/src/lib/pedal/tifa/.gitignore b/src/lib/pedal/tifa/.gitignore deleted file mode 100644 index 401fcfe58a..0000000000 --- a/src/lib/pedal/tifa/.gitignore +++ /dev/null @@ -1 +0,0 @@ -_temp_tifa.py \ No newline at end of file diff --git a/src/lib/pedal/tifa/__init__.py b/src/lib/pedal/tifa/__init__.py deleted file mode 100644 index f0fe320220..0000000000 --- a/src/lib/pedal/tifa/__init__.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -Python Type Inferencer and Flow Analyzer (TIFA) - -TIFA uses a number of simplifications of the Python language. - * Variables cannot change type - * Variables cannot be deleted - * Complex types have to be homogenous - * No introspection or reflective characteristics - * No dunder methods - * No closures (maybe?) - * You cannot write a variable out of scope - * You cannot read a mutable variable out of scope - * No multiple inheritance - -Additionally, it reads the following as issues: - * Cannot read a variable without having first written to it. - * Cannot rewrite a variable unless it has been read. - -Important concepts: - -.. glossary:: - - Issue - A problematic situation in the submitted code that will be reported - but may not stop the execution. However, when an Issue occurs, - any results may be invalid. - - Error - A situation in execution that terminates the program. - - Name - A name of a variable - - Scope - The context of a function, with its own namespaces. Represented - internally using numeric IDs (Scope IDs). - - Scope Chain - A stack of scopes, with the innermost scope on top. - - Fully Qualified Name - A string representation of a variable and its scope - chain, written using "/". For example: 0/1/4/my_variable_name - - Path - A single path of execution through the control flow; every program - has at least one sequential path, but IFs, FORs, WHILEs, etc. can - cause multiple paths. Paths are represented using numeric IDs (Path - IDs). - - State - Information about a Name that indicates things like the variable's - current type and whether that name has been read, set, or - overwritten. - - Identifier - A wrapper around variables, used to hold their potential - non-existence (which is an Issue but not an Error). - - Type - A symbolic representation of the variable's type. - - Literal - Sometimes, we need a specialized representation of a literal value - to be passed around. This is particularly important for accessing - elements in an tuples. - - Name Map - (Path x Fully Qualified Names) => States -""" - -from pedal.tifa.tifa import Tifa -from pedal.report import MAIN_REPORT - -NAME = 'TIFA' -SHORT_DESCRIPTION = "Finds common issues caused by students." -DESCRIPTION = '''Python Type Inferencer and Flow Analyzer (TIFA) - -Tifa traverses an AST to detect common issues made by students. -''' -REQUIRES = ['Source'] -OPTIONALS = [] - - -def tifa_analysis(python_3=True, report=None): - """ - Perform the TIFA analysis and attach the results to the Report. - - Args: - python_3 (bool): Whether to expect a Python3 formated file, or Python - 2. This has slight nuance on certain AST elements. - report (:class:`Report`): The Report object to attach results to. - Defaults to :data:`MAIN_REPORT`. - """ - if report is None: - report = MAIN_REPORT - t = Tifa(python_3=python_3, report=report) - t.process_code(report['source']['code']) - return t - - -__all__ = ['NAME', 'DESCRIPTION', 'SHORT_DESCRIPTION', - 'REQUIRES', 'OPTIONALS', - 'tifa_analysis', 'Tifa'] diff --git a/src/lib/pedal/tifa/builtin_definitions.py b/src/lib/pedal/tifa/builtin_definitions.py deleted file mode 100644 index 9ffe0d6742..0000000000 --- a/src/lib/pedal/tifa/builtin_definitions.py +++ /dev/null @@ -1,212 +0,0 @@ -from pedal.tifa.type_definitions import (UnknownType, FunctionType, - NumType, NoneType, BoolType, - TupleType, ListType, StrType, - FileType, DictType, ModuleType, - SetType, DayType, TimeType, ClassType, - LiteralNum) - - -def get_builtin_module(name): - if name == 'matplotlib': - return ModuleType('matplotlib', - submodules={ - 'pyplot': ModuleType('pyplot', fields={ - 'plot': FunctionType(name='plot', returns=NoneType()), - 'hist': FunctionType(name='hist', returns=NoneType()), - 'scatter': FunctionType(name='scatter', returns=NoneType()), - 'show': FunctionType(name='show', returns=NoneType()), - 'xlabel': FunctionType(name='xlabel', returns=NoneType()), - 'ylabel': FunctionType(name='ylabel', returns=NoneType()), - 'title': FunctionType(name='title', returns=NoneType()), - }) - }) - elif name == 'pprint': - return ModuleType('pprint', - fields={ - 'pprint': FunctionType(name='pprint', returns=NoneType()) - }) - elif name == 'random': - return ModuleType('random', - fields={ - 'randint': FunctionType(name='randint', returns=NumType()) - }) - elif name == 'string': - return ModuleType('string', - fields={ - 'letters': StrType(empty=False), - 'digits': StrType(empty=False), - 'ascii_letters': StrType(empty=False), - 'punctuation': StrType(empty=False), - 'printable': StrType(empty=False), - 'whitespace': StrType(empty=False), - 'ascii_uppercase': StrType(empty=False), - 'ascii_lowercase': StrType(empty=False), - 'hexdigits': StrType(empty=False), - 'octdigits': StrType(empty=False), - }) - elif name == 'turtle': - return ModuleType('turtle', - fields={ - 'forward': FunctionType(name='forward', returns=NoneType()), - 'backward': FunctionType(name='backward', returns=NoneType()), - 'color': FunctionType(name='color', returns=NoneType()), - 'right': FunctionType(name='right', returns=NoneType()), - 'left': FunctionType(name='left', returns=NoneType()), - }) - elif name == 'parking': - return ModuleType('parking', - fields={ - 'Time': FunctionType(name='Time', returns=TimeType()), - 'now': FunctionType(name='now', returns=TimeType()), - 'Day': FunctionType(name='Day', returns=DayType()), - 'today': FunctionType(name='today', returns=DayType()), - }), - elif name == 'math': - return ModuleType('math', - fields={ - 'ceil': FunctionType(name='ceil', returns=NumType()), - 'copysign': FunctionType(name='copysign', returns=NumType()), - 'fabs': FunctionType(name='fabs', returns=NumType()), - 'factorial': FunctionType(name='factorial', returns=NumType()), - 'floor': FunctionType(name='floor', returns=NumType()), - 'fmod': FunctionType(name='fmod', returns=NumType()), - 'frexp': FunctionType(name='frexp', returns=NumType()), - 'fsum': FunctionType(name='fsum', returns=NumType()), - 'gcd': FunctionType(name='gcd', returns=NumType()), - 'isclose': FunctionType(name='isclose', returns=BoolType()), - 'isfinite': FunctionType(name='isfinite', returns=BoolType()), - 'isinf': FunctionType(name='isinf', returns=BoolType()), - 'isnan': FunctionType(name='isnan', returns=BoolType()), - 'ldexp': FunctionType(name='ldexp', returns=NumType()), - 'modf': FunctionType(name='modf', returns=NumType()), - 'trunc': FunctionType(name='trunc', returns=NumType()), - 'log': FunctionType(name='log', returns=NumType()), - 'log1p': FunctionType(name='log1p', returns=NumType()), - 'log2': FunctionType(name='log2', returns=NumType()), - 'log10': FunctionType(name='log10', returns=NumType()), - 'pow': FunctionType(name='pow', returns=NumType()), - 'sqrt': FunctionType(name='sqrt', returns=NumType()), - 'sin': FunctionType(name='sin', returns=NumType()), - 'cos': FunctionType(name='cos', returns=NumType()), - 'tan': FunctionType(name='tan', returns=NumType()), - 'asin': FunctionType(name='asin', returns=NumType()), - 'acos': FunctionType(name='acos', returns=NumType()), - 'atan': FunctionType(name='atan', returns=NumType()), - 'atan2': FunctionType(name='atan2', returns=NumType()), - 'hypot': FunctionType(name='hypot', returns=NumType()), - 'degrees': FunctionType(name='degrees', returns=NumType()), - 'radians': FunctionType(name='radians', returns=NumType()), - 'sinh': FunctionType(name='sinh', returns=NumType()), - 'cosh': FunctionType(name='cosh', returns=NumType()), - 'tanh': FunctionType(name='tanh', returns=NumType()), - 'asinh': FunctionType(name='asinh', returns=NumType()), - 'acosh': FunctionType(name='acosh', returns=NumType()), - 'atanh': FunctionType(name='atanh', returns=NumType()), - 'erf': FunctionType(name='erf', returns=NumType()), - 'erfc': FunctionType(name='erfc', returns=NumType()), - 'gamma': FunctionType(name='gamma', returns=NumType()), - 'lgamma': FunctionType(name='lgamma', returns=NumType()), - 'pi': NumType(), - 'e': NumType(), - 'tau': NumType(), - 'inf': NumType(), - 'nan': NumType(), - }) - - -def _builtin_sequence_constructor(sequence_type): - """ - Helper function for creating constructors for the Set and List types. - These constructors use the subtype of the arguments. - - Args: - sequence_type (Type): A function for creating new sequence types. - """ - - def sequence_call(tifa, function_type, callee, args, position): - # TODO: Should inherit the emptiness too - return_type = sequence_type(empty=True) - if args: - return_type.subtype = args[0].index(LiteralNum(0)) - return_type.empty = False - return return_type - - return sequence_call - - -def _builtin_zip(tifa, function_type, callee, args, position): - """ - Definition of the built-in zip function, which consumes a series of - sequences and returns a list of tuples, with each tuple composed of the - elements of the sequence paired (or rather, tupled) together. - """ - if args: - tupled_types = TupleType(subtypes=[]) - for arg in args: - tupled_types.append(arg.index(0)) - return ListType(tupled_types, empty=False) - return ListType(empty=True) - - -# TODO: Exceptions - -def get_builtin_function(name): - # Void Functions - if name == "print": - return FunctionType(name="print", returns=NoneType()) - # Math Functions - elif name in ("int", "abs", "float", "len", "ord", "pow", "round", "sum"): - return FunctionType(name=name, returns=NumType()) - # Boolean Functions - elif name in ("bool", "all", "any", "isinstance"): - return FunctionType(name=name, returns=BoolType()) - # String Functions - elif name in ("str", 'chr', 'bin', 'repr', 'input'): - return FunctionType(name=name, returns=StrType()) - # File Functions - elif name == "open": - return FunctionType(name="open", returns=FileType()) - # List Functions - elif name == "map": - return FunctionType(name="map", returns=ListType(empty=False)) - elif name == "list": - return FunctionType(name="list", - definition=_builtin_sequence_constructor(ListType)) - # Set Functions - elif name == "set": - return FunctionType(name="set", - definition=_builtin_sequence_constructor(SetType)) - # Dict Functions - elif name == "dict": - return FunctionType(name="dict", returns=DictType()) - # Pass through - elif name == "sorted": - return FunctionType(name="sorted", returns='identity') - elif name == "reversed": - return FunctionType(name="reversed", returns='identity') - elif name == "filter": - return FunctionType(name="filter", returns='identity') - # Special Functions - elif name == "type": - return FunctionType(name="type", returns=UnknownType()) - elif name == "range": - return FunctionType(name="range", returns=ListType(NumType(), empty=False)) - elif name == "dir": - return FunctionType(name="dir", returns=ListType(StrType(), empty=False)) - elif name == "max": - return FunctionType(name="max", returns='element') - elif name == "min": - return FunctionType(name="min", returns='element') - elif name == "zip": - return FunctionType(name="zip", returns=_builtin_zip) - elif name == "__import__": - return FunctionType(name="__import__", returns=ModuleType()) - elif name == "globals": - return FunctionType(name="globals", - returns=DictType(keys=StrType(), - values=UnknownType(), - empty=False)) - elif name in ("classmethod", "staticmethod"): - return FunctionType(name=name, returns='identity') - elif name in ("__name__",): - return StrType() diff --git a/src/lib/pedal/tifa/identifier.py b/src/lib/pedal/tifa/identifier.py deleted file mode 100644 index f250069edb..0000000000 --- a/src/lib/pedal/tifa/identifier.py +++ /dev/null @@ -1,24 +0,0 @@ -class Identifier: - """ - A representation of an Identifier, encapsulating its current level of - existence, scope and State. - - Attributes: - exists (bool): Whether or not the variable actually is defined anywhere. - It is possible that a variable was retrieved that does - not actually exist yet, which indicates it might need to - be created. - in_scope (bool): Whether or not the variable exists in the current - scope. Used to detect the presence of certain kinds - of errors where the user is using a variable from - a different scope. - scoped_name (str): The fully qualified name of the variable, including - its scope chain. - state (State): The current state of the variable. - """ - - def __init__(self, exists, in_scope=False, scoped_name="UNKNOWN", state=""): - self.exists = exists - self.in_scope = in_scope - self.scoped_name = scoped_name - self.state = state diff --git a/src/lib/pedal/tifa/messages.py b/src/lib/pedal/tifa/messages.py deleted file mode 100644 index 8bc2cde2c7..0000000000 --- a/src/lib/pedal/tifa/messages.py +++ /dev/null @@ -1,167 +0,0 @@ -import ast - -OPERATION_DESCRIPTION = { - ast.Pow: "an exponent", - ast.Add: "an addition", - ast.Mult: "a multiplication", - ast.Sub: "a subtraction", - ast.Div: "a division", - ast.FloorDiv: "a division", - ast.Mod: "a modulo", - ast.LShift: "a left shift", - ast.RShift: "a right shift", - ast.BitOr: "a bit or", - ast.BitAnd: "a bit and", - ast.BitXor: "a bit xor", - ast.And: "an and", - ast.Or: "an or", - ast.Eq: "an ==", - ast.NotEq: "a !=", - ast.Lt: "a <", - ast.LtE: "a <=", - ast.Gt: "a >", - ast.GtE: "a >=", - ast.Is: "an is", - ast.IsNot: "an is not", - ast.In: "an in", - ast.NotIn: "an not in", -} - - -def _format_message(issue, data): - if issue == 'Action after return': - # A path had a statement after a return. - return ("You performed an action after already returning from a " - "function, on line {line}. You can only return on a path " - "once.").format(line=data['position']['line']) - elif issue == 'Return outside function': - # Attempted to return outside of a function - return ("You attempted to return outside of a function on line {line}." - " But you can only return from within a function." - ).format(line=data['position']['line']) - elif issue == "Multiple Return Types": - return ("Your function returned {actual} on line {line}, even though you defined it to" - " return {expected}. Your function should return values consistently." - ).format(expected=data['expected'], actual=data['actual'], line=data['position']['line']) - elif issue == 'Write out of scope': - # DEPRECATED - # Attempted to modify a variable in a higher scope - return False - return ("You attempted to write a variable from a higher scope " - "(outside the function) on line {line}. You should only " - "use variables inside the function they were declared in." - ).format(line=data['position']['line']) - elif issue == 'Unconnected blocks': - # Any names with ____ - return ("It looks like you have unconnected blocks on line {line}. " - "Before you run your program, you must make sure that all " - "of your blocks are connected that there are no unfilled " - "holes.").format(line=data['position']['line']) - elif issue == 'Iteration Problem': - # Iteration list is the iteration variable - return ("The variable {name} was iterated on line " - "{line} but you used the same variable as the iteration " - "variable. You should choose a different variable name " - "for the iteration variable. Usually, the iteration variable " - "is the singular form of the iteration list (e.g., " - "for a_dog in dogs:).").format( - line=data['position']['line'], - name=data['name']) - elif issue == 'Initialization Problem': - # A variable was read before it was defined - return ("The variable {name} was used on line {line}, " - "but it was not given a value on a previous line. " - "You cannot use a variable until it has been given a value." - ).format(line=data['position']['line'], name=data['name']) - elif issue == 'Possible Initialization Problem': - # A variable was read but was not defined in every branch - if data['name'] == '*return': - return False - return ("The variable {name} was used on line {line}, " - "but it was possibly not given a value on a previous " - "line. You cannot use a variable until it has been given " - "a value. Check to make sure that this variable was " - "declared in all of the branches of your decision." - ).format(line=data['position']['line'], name=data['name']) - elif issue == 'Unused Variable': - # A variable was not read after it was defined - name = data['name'] - if data['type'].is_equal('function'): - kind = 'function' - body = 'definition' - else: - kind = 'variable' - body = 'value' - return ("The {kind} {name} was given a {body}, but " - "was never used after that." - ).format(name=name, kind=kind, body=body) - elif issue == 'Overwritten Variable': - return ("The variable {name} was given a value, but " - "{name} was changed on line {line} before it " - "was used. One of the times that you gave {name} " - "a value was incorrect." - ).format(line=data['position']['line'], name=data['name']) - elif issue == 'Iterating over Non-list': - if 'name' not in data or data['name'] is None: - expression = "expression" - else: - expression = "variable {}".format(data['name']) - return ("The {expression} is not a list, but you used " - "it in the iteration on line {line}. You should only iterate " - "over sequences like lists." - ).format(line=data['position']['line'], expression=expression) - elif issue == 'Iterating over empty list': - if 'name' not in data or data['name'] is None: - expression = "expression" - else: - expression = "variable {}".format(data['name']) - return ("The {expression} was set as an empty list, " - "and then you attempted to use it in an iteration on line " - "{line}. You should only iterate over non-empty lists." - ).format(line=data['position']['line'], expression=expression) - elif issue == 'Incompatible types': - op = OPERATION_DESCRIPTION.get(data['operation'].__class__, - str(data['operation'])) - left = data['left'].singular_name - right = data['right'].singular_name - line = data['position']['line'] - return ("You used {op} operation with {left} and {right} on line " - "{line}. But you can't do that with that operator. Make " - "sure both sides of the operator are the right type." - ).format(op=op, left=left, right=right, line=line) - elif issue == "Parameter Type Mismatch": - name = data['parameter_name'] - parameter = data['parameter'].singular_name - argument = data['argument'].singular_name - line = data['position']['line'] - return ("You defined the parameter {name} on line {line} " - "as {parameter}. However, the argument passed to that parameter " - "was {argument}. The formal parameter type must match the argument's type." - ).format(name=name, argument=argument, parameter=parameter, line=line) - elif issue == 'Read out of scope': - return ("You attempted to read a variable from a different scope on " - "line {line}. You should only use variables inside the " - "function they were declared in." - ).format(line=data['position']['line']) - return False - - -''' -TODO: Finish these checks -"Empty Body": [], # Any use of pass on its own -"Malformed Conditional": [], # An if/else with empty else or if -"Unnecessary Pass": [], # Any use of pass -"Append to non-list": [], # Attempted to use the append method on a non-list -"Used iteration list": [], # -"Unused iteration variable": [], # -"Type changes": [], # -"Unknown functions": [], # -"Not a function": [], # Attempt to call non-function as function -"Recursive Call": [], -"Incorrect Arity": [], -"Aliased built-in": [], # -"Method not in Type": [], # A method was used that didn't exist for that type -"Submodule not found": [], -"Module not found": [], -"Else on loop body": [], # Used an Else on a For or While -''' diff --git a/src/lib/pedal/tifa/readme.md b/src/lib/pedal/tifa/readme.md deleted file mode 100644 index fd100ad20c..0000000000 --- a/src/lib/pedal/tifa/readme.md +++ /dev/null @@ -1,5 +0,0 @@ -PyTIFA is the Python Type Inferencer and Flow Analyzer, also called just Tifa. - -Tifa is meant to be used on simple programs written in learning situations, in order to provide type information and detect certain common issues. It makes some very strong assumptions and doesn't support all language features. - -Tifa is supported by Skulpt. This means that Tifa is a Python library that can be passed in a string of Python source code in order to traverse its AST using underlying JavaScript libraries. If that isn't confusing, then we invite you to make pull requests. \ No newline at end of file diff --git a/src/lib/pedal/tifa/state.py b/src/lib/pedal/tifa/state.py deleted file mode 100644 index fe75ccd428..0000000000 --- a/src/lib/pedal/tifa/state.py +++ /dev/null @@ -1,77 +0,0 @@ -def check_trace(state): - past_types = [state.type] - for past_state in state.trace: - past_types.extend(check_trace(past_state)) - return past_types - - -class State: - """ - A representation of a variable at a particular point in time of the program. - - Attributes: - name (str): The name of the variable, without its scope chain - trace (list of State): A recursive definition of previous States for - this State. - type (Type): The current type of this variable. - method (str): One of 'store', 'read', (TODO). Indicates the change that - occurred to this variable at this State. - position (dict): A Position dictionary indicating where this State - change occurred in the source code. - read (str): One of 'yes', 'no', or 'maybe'. Indicates if this variable - has been read since it was last changed. If merged from a - diverging path, it is possible that it was "maybe" read. - set (str): One of 'yes', 'no', or 'maybe'. Indicates if this variable - has been set since it was last read. If merged from a - diverging path, it is possible that it was "maybe" changed. - over (str): One of 'yes', 'no', or 'maybe'. Indicates if this variable - has been overwritten since it was last set. If merged from a - diverging path, it is possible that it was "maybe" changed. - over_position (dict): A Position indicating where the State was - previously set versus when it was overwritten. - """ - - def __init__(self, name, trace, type, method, position, - read='maybe', set='maybe', over='maybe', over_position=None): - self.name = name - self.trace = trace - self.type = type - self.method = method - self.position = position - self.over_position = over_position - self.read = read - self.set = set - self.over = over - - def copy(self, method, position): - """ - Make a copy of this State, copying this state into the new State's trace - """ - return State(self.name, [self], self.type, method, position, - self.read, self.set, self.over, self.over_position) - - def __str__(self): - """ - Create a string representation of this State. - """ - return "{method}(r:{read},s:{set},o:{over},{type})".format( - method=self.method, - read=self.read[0], - set=self.set[0], - over=self.over[0], - type=self.type.__class__.__name__ - ) - - def __repr__(self): - """ - Create a string representation of this State. - """ - return str(self) - - def was_type(self, a_type): - """ - Retrieve all the types that this variable took on over its entire - trace. - """ - past_types = check_trace(self) - return any(past_type.is_equal(a_type) for past_type in past_types) diff --git a/src/lib/pedal/tifa/tifa.py b/src/lib/pedal/tifa/tifa.py deleted file mode 100644 index 8fadd48c0c..0000000000 --- a/src/lib/pedal/tifa/tifa.py +++ /dev/null @@ -1,1293 +0,0 @@ -import ast -from pprint import pprint - -from pedal.report import MAIN_REPORT - -from pedal.tifa.type_definitions import (UnknownType, RecursedType, - FunctionType, ClassType, InstanceType, - NumType, NoneType, BoolType, TupleType, - ListType, StrType, GeneratorType, - DictType, ModuleType, SetType, - # FileType, DayType, TimeType, - type_from_json, type_to_literal, get_tifa_type, - LiteralNum, LiteralBool, - LiteralNone, LiteralStr, - LiteralTuple) -from pedal.tifa.builtin_definitions import (get_builtin_module, get_builtin_function) -from pedal.tifa.type_operations import (merge_types, are_types_equal, - VALID_UNARYOP_TYPES, VALID_BINOP_TYPES, - ORDERABLE_TYPES, INDEXABLE_TYPES) -from pedal.tifa.identifier import Identifier -from pedal.tifa.state import State -from pedal.tifa.messages import _format_message - -__all__ = ['Tifa'] - - -class Tifa(ast.NodeVisitor): - """ - TIFA Class for traversing an AST and finding common issues. - - Args: - python_3 (bool): Whether to parse the code in regular PYTHON_3 mode or - the modified AST that Skulpt uses. - report (Report): The report object to store data and feedback in. If - left None, defaults to the global MAIN_REPORT. - """ - - def __init__(self, python_3=True, report=None): - if report is None: - report = MAIN_REPORT - self.report = report - self._initialize_report() - - def _initialize_report(self): - """ - Initialize a successful report with possible set of issues. - """ - self.report['tifa'] = { - 'success': True, - 'variables': {}, - 'top_level_variables': {}, - 'issues': {} - } - - def report_issue(self, issue, data=None): - """ - Report the given issue with associated metadata, including the position - if not explicitly included. - """ - if data is None: - data = {} - if 'position' not in data: - data['position'] = self.locate() - data['message'] = _format_message(issue, data) - if issue not in self.report['tifa']['issues']: - self.report['tifa']['issues'][issue] = [] - self.report['tifa']['issues'][issue].append(data) - if data['message']: - self.report.attach(issue, category='Analyzer', tool='TIFA', - mistake=data) - - def locate(self, node=None): - """ - Return a dictionary representing the current location within the - AST. - - Returns: - Position dict: A dictionary with the fields 'column' and 'line', - indicating the current position in the source code. - """ - if node is None: - if self.node_chain: - node = self.node_chain[-1] - else: - node = self.final_node - return {'column': node.col_offset, 'line': node.lineno} - - def process_code(self, code, filename="__main__"): - """ - Processes the AST of the given source code to generate a report. - - Args: - code (str): The Python source code - filename (str): The filename of the source code (defaults to __main__) - Returns: - Report: The successful or successful report object - """ - # Code - self.source = code.split("\n") if code else [] - filename = filename - - # Attempt parsing - might fail! - try: - ast_tree = ast.parse(code, filename) - except Exception as error: - self.report['tifa']['success'] = False - self.report['tifa']['error'] = error - self.report.attach('tifa_error', category='Analyzer', tool='TIFA', - mistake={ - 'message': "Could not parse code", - 'error': error - }) - return self.report['tifa'] - try: - return self.process_ast(ast_tree) - except Exception as error: - self.report['tifa']['success'] = False - self.report['tifa']['error'] = error - self.report.attach('tifa_error', category='Analyzer', tool='TIFA', - mistake={ - 'message': "Could not process code: " + str(error), - 'error': error - }) - return self.report['tifa'] - - def process_ast(self, ast_tree): - """ - Given an AST, actually performs the type and flow analyses to return a - report. - - Args: - ast_tree (Ast): The AST object - Returns: - Report: The final report object created (also available as a field). - """ - self._reset() - # Traverse every node - self.visit(ast_tree) - - # Check afterwards - self.report['tifa']['variables'] = self.name_map - self._finish_scope() - - # Collect top level variables - self._collect_top_level_variables() - # print(self.report['variables']) - - return self.report['tifa'] - - def _collect_top_level_variables(self): - """ - Walk through the variables and add any at the top level to the - top_level_variables field of the report. - """ - top_level_variables = self.report['tifa']['top_level_variables'] - main_path_vars = self.name_map[self.path_chain[0]] - for full_name in main_path_vars: - split_name = full_name.split("/") - if len(split_name) == 2 and split_name[0] == str(self.scope_chain[0]): - name = split_name[1] - top_level_variables[name] = main_path_vars[full_name] - - def _reset(self): - """ - Reinitialize fields for maintaining the system - """ - # Unique Global IDs - self.path_id = 0 - self.scope_id = 0 - self.ast_id = 0 - - # Human readable names - self.path_names = ['*Module'] - self.scope_names = ['*Module'] - self.node_chain = [] - - # Complete record of all Names - self.scope_chain = [self.scope_id] - self.path_chain = [self.path_id] - self.name_map = {} - self.name_map[self.path_id] = {} - self.definition_chain = [] - self.path_parents = {} - self.final_node = None - self.class_scopes = {} - - def find_variable_scope(self, name): - """ - Walk through this scope and all enclosing scopes, finding the relevant - identifier given by `name`. - - Args: - name (str): The name of the variable - Returns: - Identifier: An Identifier for the variable, which could potentially - not exist. - """ - for scope_level, scope in enumerate(self.scope_chain): - for path_id in self.path_chain: - path = self.name_map[path_id] - full_name = "/".join(map(str, self.scope_chain[scope_level:])) + "/" + name - if full_name in path: - is_root_scope = (scope_level == 0) - return Identifier(True, is_root_scope, - full_name, path[full_name]) - - return Identifier(False) - - def find_variable_out_of_scope(self, name): - """ - Walk through every scope and determine if this variable can be found - elsewhere (which would be an issue). - - Args: - name (str): The name of the variable - Returns: - Identifier: An Identifier for the variable, which could potentially - not exist. - """ - for path in self.name_map.values(): - for full_name in path: - unscoped_name = full_name.split("/")[-1] - if name == unscoped_name: - return Identifier(True, False, unscoped_name, path[full_name]) - return Identifier(False) - - def find_path_parent(self, path_id, name): - if name in self.name_map[path_id]: - return Identifier(True, state=self.name_map[path_id][name]) - else: - path_parent = self.path_parents.get(path_id) - if path_parent is None: - return Identifier(False) - else: - return self.find_path_parent(path_parent, name) - - def _finish_scope(self): - """ - Walk through all the variables present in this scope and ensure that - they have been read and not overwritten. - """ - path_id = self.path_chain[0] - for name in self.name_map[path_id]: - if Tifa.in_scope(name, self.scope_chain): - state = self.name_map[path_id][name] - if state.over == 'yes': - position = state.over_position - self.report_issue('Overwritten Variable', - {'name': state.name, 'position': position}) - if state.read == 'no': - self.report_issue('Unused Variable', - {'name': state.name, 'type': state.type, - 'position': state.position}) - - def visit(self, node): - """ - Process this node by calling its appropriate visit_* - - Args: - node (AST): The node to visit - Returns: - Type: The type calculated during the visit. - """ - # Start processing the node - self.node_chain.append(node) - self.ast_id += 1 - - # Actions after return? - if len(self.scope_chain) > 1: - return_state = self.find_variable_scope("*return") - if return_state.exists and return_state.in_scope: - if return_state.state.set == "yes": - self.report_issue("Action after return") - - # No? All good, let's enter the node - self.final_node = node - result = ast.NodeVisitor.visit(self, node) - - # Pop the node out of the chain - self.ast_id -= 1 - self.node_chain.pop() - - # If a node failed to return something, return the UNKNOWN TYPE - if result is None: - return UnknownType() - else: - return result - - def _visit_nodes(self, nodes): - """ - Visit all the nodes in the given list. - - Args: - nodes (list): A list of values, of which any AST nodes will be - visited. - """ - for node in nodes: - if isinstance(node, ast.AST): - self.visit(node) - - def walk_targets(self, targets, type, walker): - """ - Iterate through the targets and call the given function on each one. - - Args: - targets (list of Ast nodes): A list of potential targets to be - traversed. - type (Type): The given type to be unraveled and applied to the - targets. - walker (Ast Node, Type -> None): A function that will process - each target and unravel the type. - """ - for target in targets: - walker(target, type) - - def _walk_target(self, target, type): - """ - Recursively apply the type to the target - - Args: - target (Ast): The current AST node to process - type (Type): The type to apply to this node - """ - if isinstance(target, ast.Name): - self.store_iter_variable(target.id, type, self.locate(target)) - return target.id - elif isinstance(target, (ast.Tuple, ast.List)): - result = None - for i, elt in enumerate(target.elts): - elt_type = type.index(LiteralNum(i)) - potential_name = self._walk_target(elt, elt_type) - if potential_name is not None and result is None: - result = potential_name - return result - - def visit_AnnAssign(self, node): - """ - TODO: Implement! - """ - pass - - def visit_Assign(self, node): - """ - Simple assignment statement: - __targets__ = __value__ - - Args: - node (AST): An Assign node - Returns: - None - """ - # Handle value - value_type = self.visit(node.value) - # Handle targets - self._visit_nodes(node.targets) - - self.walk_targets(node.targets, value_type, self.assign_target) - - # TODO: Properly handle assignments with subscripts - def assign_target(self, target, type): - if isinstance(target, ast.Name): - self.store_variable(target.id, type) - elif isinstance(target, (ast.Tuple, ast.List)): - for i, elt in enumerate(target.elts): - eltType = type.index(LiteralNum(i)) - self.assign_target(elt, eltType) - elif isinstance(target, ast.Subscript): - left_hand_type = self.visit(target.value) - if isinstance(left_hand_type, ListType): - # TODO: Handle updating value in list - pass - elif isinstance(left_hand_type, DictType): - if not isinstance(target.slice, ast.Index): - # TODO: Can't subscript a dictionary assignment - return None - literal = self.get_literal(target.slice.value) - if not literal: - key_type = self.visit(target.slice.value) - left_hand_type.empty = False - left_hand_type.keys = key_type.clone() - left_hand_type.values = type.clone() - elif left_hand_type.literals: - original_type = left_hand_type.has_literal(literal) - if not original_type: - left_hand_type.update_key(literal, type.clone()) - elif not are_types_equal(original_type, type): - # TODO: Fix "Dictionary" to be the name of the variable - self.report_issue("Type changes", - {'name': "Dictionary", 'old': original_type, - 'new': type}) - elif isinstance(target, ast.Attribute): - left_hand_type = self.visit(target.value) - if isinstance(left_hand_type, InstanceType): - left_hand_type.add_attr(target.attr, type) - # TODO: Otherwise we attempted to assign to a non-instance - # TODO: Handle minor type changes (e.g., appending to an inner list) - - def visit_AugAssign(self, node): - # Handle value - right = self.visit(node.value) - # Handle target - left = self.visit(node.target) - # Target is always a Name, Subscript, or Attribute - name = self.identify_caller(node.target) - - # Handle operation - self.load_variable(name) - if isinstance(left, UnknownType) or isinstance(right, UnknownType): - return UnknownType() - elif type(node.op) in VALID_BINOP_TYPES: - op_lookup = VALID_BINOP_TYPES[type(node.op)] - if type(left) in op_lookup: - op_lookup = op_lookup[type(left)] - if type(right) in op_lookup: - op_lookup = op_lookup[type(right)] - result_type = op_lookup(left, right) - self.assign_target(node.target, result_type) - return result_type - - self.report_issue("Incompatible types", - {"left": left, "right": right, - "operation": node.op}) - - def visit_Attribute(self, node): - # Handle value - value_type = self.visit(node.value) - # Handle ctx - # TODO: Handling contexts - # Handle attr - return value_type.load_attr(node.attr, self, node.value, self.locate()) - - def visit_BinOp(self, node): - # Handle left and right - left = self.visit(node.left) - right = self.visit(node.right) - - # Handle operation - if isinstance(left, UnknownType) or isinstance(right, UnknownType): - return UnknownType() - elif type(node.op) in VALID_BINOP_TYPES: - op_lookup = VALID_BINOP_TYPES[type(node.op)] - if type(left) in op_lookup: - op_lookup = op_lookup[type(left)] - if type(right) in op_lookup: - op_lookup = op_lookup[type(right)] - return op_lookup(left, right) - - self.report_issue("Incompatible types", - {"left": left, "right": right, - "operation": node.op}) - return UnknownType() - - def visit_Bool(self, node): - return BoolType() - - def visit_BoolOp(self, node): - # Handle left and right - values = [] - for value in node.values: - values.append(self.visit(value)) - - # TODO: Truthiness is not supported! Probably need a Union type - # TODO: Literals used as truthy value - - # Handle operation - return BoolType() - - def visit_Call(self, node): - # Handle func part (Name or Attribute) - function_type = self.visit(node.func) - # TODO: Need to grab the actual type in some situations - callee = self.identify_caller(node) - - # Handle args - arguments = [self.visit(arg) for arg in node.args] if node.args else [] - - # TODO: Handle keywords - # TODO: Handle starargs - # TODO: Handle kwargs - if isinstance(function_type, FunctionType): - # Test if we have called this definition before - if function_type.definition not in self.definition_chain: - self.definition_chain.append(function_type.definition) - # Function invocation - result = function_type.definition(self, function_type, callee, - arguments, self.locate()) - self.definition_chain.pop() - return result - else: - self.report_issue("Recursive Call", {"name": callee}) - elif isinstance(function_type, ClassType): - constructor = function_type.get_constructor().definition - self.definition_chain.append(constructor) - result = constructor(self, constructor, callee, arguments, self.locate()) - self.definition_chain.pop() - if '__init__' in function_type.fields: - initializer = function_type.fields['__init__'] - if isinstance(initializer, FunctionType): - self.definition_chain.append(initializer) - initializer.definition(self, initializer, result, [result] + arguments, self.locate()) - self.definition_chain.pop() - return result - else: - self.report_issue("Not a function", {"name": callee}) - return UnknownType() - - def visit_ClassDef(self, node): - class_name = node.name - new_class_type = ClassType(class_name) - self.store_variable(class_name, new_class_type) - # TODO: Define a new scope definition that executes the body - # TODO: find __init__, execute that - definitions_scope = self.scope_chain[:] - class_scope = Tifa.NewScope(self, definitions_scope, class_type=new_class_type) - with class_scope: - self.generic_visit(node) - - def visit_Compare(self, node): - # Handle left and right - left = self.visit(node.left) - comparators = [self.visit(compare) for compare in node.comparators] - - # Handle ops - for op, right in zip(node.ops, comparators): - if isinstance(op, (ast.Eq, ast.NotEq, ast.Is, ast.IsNot)): - continue - elif isinstance(op, (ast.Lt, ast.LtE, ast.GtE, ast.Gt)): - if are_types_equal(left, right): - if isinstance(left, ORDERABLE_TYPES): - continue - elif isinstance(op, (ast.In, ast.NotIn)): - if isinstance(right, INDEXABLE_TYPES): - continue - self.report_issue("Incompatible types", - {"left": left, "right": right, - "operation": op}) - return BoolType() - - def _visit_collection_loop(self, node): - # Handle the iteration list - iter = node.iter - iter_list_name = None - if isinstance(iter, ast.Name): - iter_list_name = iter.id - if iter_list_name == "___": - self.report_issue("Unconnected blocks", - {"position": self.locate(iter)}) - state = self.iterate_variable(iter_list_name, self.locate(iter)) - iter_type = state.type - else: - iter_type = self.visit(iter) - - if iter_type.is_empty(): - # TODO: It should check if its ONLY ever iterating over an empty list. - # For now, only reports if we are NOT in a function - if len(self.scope_chain) == 1: - self.report_issue("Iterating over empty list", - {"name": iter_list_name, - "position": self.locate(iter)}) - - if not isinstance(iter_type, INDEXABLE_TYPES): - self.report_issue("Iterating over Non-list", - {"name": iter_list_name, - "position": self.locate(iter)}) - - iter_subtype = iter_type.index(LiteralNum(0)) - - # Handle the iteration variable - iter_variable_name = self._walk_target(node.target, iter_subtype) - - if iter_variable_name and iter_list_name: - if iter_variable_name == iter_list_name: - self.report_issue("Iteration Problem", - {"name": iter_variable_name, - "position": self.locate(node.target)}) - - def visit_comprehension(self, node): - self._visit_collection_loop(node) - # Handle ifs, unless they're blank (None in Skulpt :) - if node.ifs: - self.visit_statements(node.ifs) - - def visit_Dict(self, node): - """ - Three types of dictionaries - - empty - - uniform type - - record - TODO: Handle records appropriately - """ - type = DictType() - if not node.keys: - type.empty = True - else: - type.empty = False - all_literals = True - keys, values, literals = [], [], [] - for key, value in zip(node.keys, node.values): - literal = self.get_literal(key) - key, value = self.visit(key), self.visit(value) - values.append(value) - keys.append(key) - if literal is not None: - literals.append(literal) - else: - all_literals = False - - if all_literals: - type.literals = literals - type.values = values - else: - type.keys = key - type.values = value - return type - - def visit_DictComp(self, node): - # TODO: Handle comprehension scope - for generator in node.generators: - self.visit(generator) - keys = self.visit(node.key) - values = self.visit(node.value) - return DictType(keys=keys, values=values) - - def visit_For(self, node): - self._visit_collection_loop(node) - # Handle the bodies - self.visit_statements(node.body) - self.visit_statements(node.orelse) - - def visit_FunctionDef(self, node): - # Name - function_name = node.name - position = self.locate() - definitions_scope = self.scope_chain[:] - - def definition(tifa, call_type, call_name, parameters, call_position): - function_scope = Tifa.NewScope(self, definitions_scope) - with function_scope: - # Process arguments - args = node.args.args - if len(args) != len(parameters): - self.report_issue('Incorrect Arity', {"position": position}) - # TODO: Handle special types of parameters - for arg, parameter in zip(args, parameters): - name = arg.arg - if arg.annotation: - self.visit(arg.annotation) - annotation = get_tifa_type(arg.annotation, self) - # TODO: Use parameter information to "fill in" empty lists - if isinstance(parameter, ListType) and isinstance(annotation, ListType): - if isinstance(parameter.subtype, UnknownType): - parameter.subtype = annotation.subtype - # TODO: Check that arg.type and parameter type match! - if not are_types_equal(annotation, parameter, True): - self.report_issue("Parameter Type Mismatch", - {"parameter": annotation, "parameter_name": name, - "argument": parameter}) - if parameter is not None: - parameter = parameter.clone_mutably() - self.store_variable(name, parameter, position) - if len(args) < len(parameters): - for undefined_parameter in parameters[len(args):]: - self.store_variable(name, UnknownType(), position) - self.visit_statements(node.body) - return_state = self.find_variable_scope("*return") - return_value = NoneType() - # TODO: Figure out if we are not returning something when we should - # If the pseudo variable exists, we load it and get its type - if return_state.exists and return_state.in_scope: - return_state = self.load_variable("*return", call_position) - return_value = return_state.type - if node.returns: - # self.visit(node.returns) - returns = get_tifa_type(node.returns, self) - if not are_types_equal(return_value, returns, True): - self.report_issue("Multiple Return Types", - {"expected": returns.precise_description(), - "actual": return_value.precise_description(), - "position": return_state.position}) - return return_value - - function = FunctionType(definition=definition, name=function_name) - self.store_variable(function_name, function) - return function - - def visit_GeneratorExp(self, node): - # TODO: Handle comprehension scope - for generator in node.generators: - self.visit(generator) - return GeneratorType(self.visit(node.elt)) - - def visit_If(self, node): - # Visit the conditional - self.visit(node.test) - - if len(node.orelse) == 1 and isinstance(node.orelse[0], ast.Pass): - self.report_issue("Malformed Conditional") - elif len(node.body) == 1 and isinstance(node.body[0], ast.Pass): - if node.orelse: - self.report_issue("Malformed Conditional") - - # Visit the bodies - this_path_id = self.path_chain[0] - if_path = Tifa.NewPath(self, this_path_id, "i") - with if_path: - for statement in node.body: - self.visit(statement) - else_path = Tifa.NewPath(self, this_path_id, "e") - with else_path: - for statement in node.orelse: - self.visit(statement) - - # Combine two paths into one - # Check for any names that are on the IF path - self.merge_paths(this_path_id, if_path.id, else_path.id) - - def visit_IfExp(self, node): - # Visit the conditional - self.visit(node.test) - - # Visit the body - body = self.visit(node.body) - - # Visit the orelse - orelse = self.visit(node.orelse) - - if are_types_equal(body, orelse): - return body - - # TODO: Union type? - return UnknownType() - - def visit_Import(self, node): - # Handle names - for alias in node.names: - asname = alias.asname or alias.name - module_type = self.load_module(alias.name) - self.store_variable(asname, module_type) - - def visit_ImportFrom(self, node): - # Handle names - for alias in node.names: - if node.module is None: - asname = alias.asname or alias.name - module_type = self.load_module(alias.name) - else: - module_name = node.module - asname = alias.asname or alias.name - module_type = self.load_module(module_name) - name_type = module_type.load_attr(alias.name, self, - callee_position=self.locate()) - self.store_variable(asname, name_type) - - def visit_Lambda(self, node): - # Name - position = self.locate() - definitions_scope = self.scope_chain[:] - - def definition(tifa, call_type, call_name, parameters, call_position): - function_scope = Tifa.NewScope(self, definitions_scope) - with function_scope: - # Process arguments - args = node.args.args - if len(args) != len(parameters): - self.report_issue('Incorrect Arity', {"position": position}) - # TODO: Handle special types of parameters - for arg, parameter in zip(args, parameters): - name = arg.arg - if parameter is not None: - parameter = parameter.clone_mutably() - self.store_variable(name, parameter, position) - if len(args) < len(parameters): - for undefined_parameter in parameters[len(args):]: - self.store_variable(name, UnknownType(), position) - return_value = self.visit(node.body) - return return_value - - return FunctionType(definition=definition) - - def visit_List(self, node): - type = ListType() - if node.elts: - type.empty = False - # TODO: confirm homogenous subtype - for elt in node.elts: - type.subtype = self.visit(elt) - else: - type.empty = True - return type - - def visit_ListComp(self, node): - # TODO: Handle comprehension scope - for generator in node.generators: - self.visit(generator) - return ListType(self.visit(node.elt)) - - def visit_NameConstant(self, node): - value = node.value - if isinstance(value, bool): - return BoolType() - else: - return NoneType() - - def visit_Name(self, node): - name = node.id - if name == "___": - self.report_issue("Unconnected blocks") - if isinstance(node.ctx, ast.Load): - if name == "True" or name == "False": - return BoolType() - elif name == "None": - return NoneType() - else: - variable = self.find_variable_scope(name) - builtin = get_builtin_function(name) - if not variable.exists and builtin: - return builtin - else: - state = self.load_variable(name) - return state.type - else: - variable = self.find_variable_scope(name) - if variable.exists: - return variable.state.type - else: - return UnknownType() - - def visit_Num(self, node): - return NumType() - - def visit_Return(self, node): - if len(self.scope_chain) == 1: - self.report_issue("Return outside function") - # TODO: Unconditional return inside loop - if node.value is not None: - self.return_variable(self.visit(node.value)) - else: - self.return_variable(NoneType()) - - def visit_SetComp(self, node): - # TODO: Handle comprehension scope - for generator in node.generators: - self.visit(generator) - return SetType(self.visit(node.elt)) - - def visit_statements(self, nodes): - # TODO: Check for pass in the middle of a series of statement - if any(isinstance(node, ast.Pass) for node in nodes): - pass - return [self.visit(statement) for statement in nodes] - - def visit_Str(self, node): - if node.s == "": - return StrType(True) - else: - return StrType(False) - - def visit_Subscript(self, node): - # Handle value - value_type = self.visit(node.value) - # Handle slice - if isinstance(node.slice, ast.Index): - literal = self.get_literal(node.slice.value) - if literal is None: - dynamic_literal = type_to_literal(self.visit(node.slice.value)) - return value_type.index(dynamic_literal) - else: - return value_type.index(literal) - elif isinstance(node.slice, ast.Slice): - if node.slice.lower is not None: - self.visit(node.slice.lower) - if node.slice.upper is not None: - self.visit(node.slice.upper) - if node.slice.step is not None: - self.visit(node.slice.step) - return value_type - - def visit_Tuple(self, node): - type = TupleType() - if not node.elts: - type.empty = True - type.subtypes = [] - else: - type.empty = False - # TODO: confirm homogenous subtype - type.subtypes = [self.visit(elt) for elt in node.elts] - return type - - def visit_UnaryOp(self, node): - # Handle operand - operand = self.visit(node.operand) - - if isinstance(node.op, ast.Not): - return BoolType() - elif isinstance(operand, UnknownType): - return UnknownType() - elif type(node.op) in VALID_UNARYOP_TYPES: - op_lookup = VALID_UNARYOP_TYPES[type(node.op)] - if type(operand) in op_lookup: - return op_lookup[type(operand)]() - return UnknownType() - - def visit_While(self, node): - # Visit conditional - self.visit(node.test) - - # Visit the bodies - this_path_id = self.path_id - # One path is that we never enter the body - empty_path = Tifa.NewPath(self, this_path_id, "e") - with empty_path: - pass - # Another path is that we loop through the body and check the test again - body_path = Tifa.NewPath(self, this_path_id, "w") - with body_path: - for statement in node.body: - self.visit(statement) - # Revisit conditional - self.visit(node.test) - # If there's else bodies (WEIRD) then we should check them afterwards - if node.orelse: - self.report_issue("Else on loop body") - for statement in node.orelse: - self.visit(statement) - - # Combine two paths into one - # Check for any names that are on the IF path - self.merge_paths(this_path_id, body_path.id, empty_path.id) - - def visit_With(self, node): - for item in node.items: - type_value = self.visit(item.context_expr) - self.visit(item.optional_vars) - self._walk_target(item.optional_vars, type_value) - # Handle the bodies - self.visit_statements(node.body) - - def _scope_chain_str(self, name=None): - """ - Convert the current scope chain to a string representation (divided - by "/"). - - Returns: - str: String representation of the scope chain. - """ - if name: - return "/".join(map(str, self.scope_chain)) + "/" + name - else: - return "/".join(map(str, self.scope_chain)) - - def identify_caller(self, node): - """ - Figures out the variable that was used to kick off this call, - which is almost always the relevant Name to track as being updated. - If the origin wasn't a Name, nothing will need to be updated so None - is returned instead. - - TODO: Is this sufficient? - - Args: - node (AST): An AST node - Returns: - str or None: The name of the variable or None if no origin could - be found. - """ - if isinstance(node, ast.Name): - return node.id - elif isinstance(node, ast.Call): - return self.identify_caller(node.func) - elif isinstance(node, (ast.Attribute, ast.Subscript)): - return self.identify_caller(node.value) - return None - - def iterate_variable(self, name, position=None): - """ - Update the variable by iterating through it - this doesn't do anything - fancy yet. - """ - return self.load_variable(name, position) - - def store_iter_variable(self, name, type, position=None): - state = self.store_variable(name, type, position) - state.read = 'yes' - return state - - def return_variable(self, type): - - return self.store_variable("*return", type) - - def append_variable(self, name, type, position=None): - return self.store_variable(name, type, position) - - def store_variable(self, name, type, position=None): - """ - Update the variable with the given name to now have the new type. - - Args: - name (str): The unqualified name of the variable. The variable will - be assumed to be in the current scope. - type (Type): The new type of this variable. - Returns: - State: The new state of the variable. - """ - if position is None: - position = self.locate() - full_name = self._scope_chain_str(name) - current_path = self.path_chain[0] - variable = self.find_variable_scope(name) - if not variable.exists: - # Create a new instance of the variable on the current path - new_state = State(name, [], type, 'store', position, - read='no', set='yes', over='no') - self.name_map[current_path][full_name] = new_state - else: - new_state = self.trace_state(variable.state, "store", position) - if not variable.in_scope: - self.report_issue("Write out of scope", {'name': name}) - # Type change? - if not are_types_equal(type, variable.state.type): - self.report_issue("Type changes", - {'name': name, 'old': variable.state.type, - 'new': type, 'position': position}) - new_state.type = type - # Overwritten? - if variable.state.set == 'yes' and variable.state.read == 'no': - new_state.over_position = position - new_state.over = 'yes' - else: - new_state.set = 'yes' - new_state.read = 'no' - self.name_map[current_path][full_name] = new_state - # If this is a class scope... - current_scope = self.scope_chain[0] - if current_scope in self.class_scopes: - self.class_scopes[current_scope].add_attr(name, new_state.type) - return new_state - - def load_variable(self, name, position=None): - """ - Retrieve the variable with the given name. - - Args: - name (str): The unqualified name of the variable. If the variable is - not found in the current scope or an enclosing sope, all - other scopes will be searched to see if it was read out - of scope. - Returns: - State: The current state of the variable. - """ - full_name = self._scope_chain_str(name) - current_path = self.path_chain[0] - variable = self.find_variable_scope(name) - if position is None: - position = self.locate() - if not variable.exists: - out_of_scope_var = self.find_variable_out_of_scope(name) - # Create a new instance of the variable on the current path - if out_of_scope_var.exists: - self.report_issue("Read out of scope", {'name': name}) - else: - self.report_issue("Initialization Problem", {'name': name}) - new_state = State(name, [], UnknownType(), 'load', position, - read='yes', set='no', over='no') - self.name_map[current_path][full_name] = new_state - else: - new_state = self.trace_state(variable.state, "load", position) - if variable.state.set == 'no': - self.report_issue("Initialization Problem", {'name': name}) - if variable.state.set == 'maybe': - self.report_issue("Possible Initialization Problem", {'name': name}) - new_state.read = 'yes' - if not variable.in_scope: - self.name_map[current_path][variable.scoped_name] = new_state - else: - self.name_map[current_path][full_name] = new_state - return new_state - - def load_module(self, chain): - """ - Finds the module in the set of available modules. - - Args: - chain (str): A chain of module imports (e.g., "matplotlib.pyplot") - Returns: - ModuleType: The specific module with its members, or an empty - module type. - """ - module_names = chain.split('.') - potential_module = get_builtin_module(module_names[0]) - if potential_module is not None: - base_module = potential_module - for module in module_names: - if (isinstance(base_module, ModuleType) and - module in base_module.submodules): - base_module = base_module.submodules[module] - else: - self.report_issue("Module not found", {"name": chain}) - return base_module - else: - try: - actual_module = __import__(chain, globals(), {}, - ['_tifa_definitions']) - definitions = actual_module._tifa_definitions() - return type_from_json(definitions) - except Exception as e: - self.report_issue("Module not found", - {"name": chain, "error": str(e)}) - return ModuleType() - - def combine_states(self, left, right): - state = State(left.name, [left], left.type, 'branch', self.locate(), - read=left.read, set=left.set, over=left.over, - over_position=left.over_position) - if right is None: - state.read = 'no' if left.read == 'no' else 'maybe' - state.set = 'no' if left.set == 'no' else 'maybe' - state.over = 'no' if left.over == 'no' else 'maybe' - else: - if not are_types_equal(left.type, right.type): - self.report_issue("Type changes", {'name': left.name, - 'old': left.type, - 'new': right.type}) - state.read = Tifa.match_rso(left.read, right.read) - state.set = Tifa.match_rso(left.set, right.set) - state.over = Tifa.match_rso(left.over, right.over) - if left.over == 'no': - state.over_position = right.over_position - state.trace.append(right) - return state - - def merge_paths(self, parent_path_id, left_path_id, right_path_id): - """ - Combines any variables on the left and right path into the parent - name space. - - Args: - parent_path_id (int): The parent path of the left and right branches - left_path_id (int): One of the two paths - right_path_id (int): The other of the two paths. - """ - # Combine two paths into one - # Check for any names that are on the IF path - for left_name in self.name_map[left_path_id]: - left_state = self.name_map[left_path_id][left_name] - right_identifier = self.find_path_parent(right_path_id, left_name) - if right_identifier.exists: - # Was on both IF and ELSE path - right_state = right_identifier.state - else: - # Was only on IF path, potentially on the parent path - right_state = self.name_map[parent_path_id].get(left_name) - combined = self.combine_states(left_state, right_state) - self.name_map[parent_path_id][left_name] = combined - # Check for names that are on the ELSE path but not the IF path - for right_name in self.name_map[right_path_id]: - if right_name not in self.name_map[left_path_id]: - right_state = self.name_map[right_path_id][right_name] - # Potentially on the parent path - parent_state = self.name_map[parent_path_id].get(right_name) - combined = self.combine_states(right_state, parent_state) - self.name_map[parent_path_id][right_name] = combined - - def trace_state(self, state, method, position): - """ - Makes a copy of the given state with the given method type. - - Args: - state (State): The state to copy (as in, we trace a copy of it!) - method (str): The operation being applied to the state. - Returns: - State: The new State - """ - return state.copy(method, position) - - @staticmethod - def in_scope(full_name, scope_chain): - """ - Determine if the fully qualified variable name is in the given scope - chain. - - Args: - full_name (str): A fully qualified variable name - scope_chain (list): A representation of a scope chain. - Returns: - bool: Whether the variable lives in this scope - """ - # Get this entity's full scope chain - name_scopes = full_name.split("/")[:-1] - # against the reverse scope chain - checking_scopes = [str(s) for s in scope_chain[::-1]] - return name_scopes == checking_scopes - - @staticmethod - def match_rso(left, right): - if left == right: - return left - else: - return "maybe" - - def get_literal(self, node): - if isinstance(node, ast.Num): - return LiteralNum(node.n) - elif isinstance(node, ast.Str): - return LiteralStr(node.s) - elif isinstance(node, ast.Tuple): - values = [] - for elt in node.elts: - subvalue = self.get_literal(elt) - if subvalue is not None: - values.append(subvalue) - else: - return None - return LiteralTuple(values) - elif isinstance(node, ast.Name): - if node.id == "None": - return LiteralNone() - elif node.id == "False": - return LiteralBool(False) - elif node.id == "True": - return LiteralBool(True) - return None - - class NewPath: - """ - Context manager for entering and leaving execution paths (e.g., if - statements).) - - Args: - tifa (Tifa): The tifa instance, so we can modify some of its - properties that track variables and paths. - origin_path (int): The path ID parent to this one. - name (str): The symbolic name of this path, typically 'i' for an IF - body and 'e' for ELSE body. - - Fields: - id (int): The path ID of this path - """ - - def __init__(self, tifa, origin_path, name): - self.tifa = tifa - self.name = name - self.origin_path = origin_path - self.id = None - - def __enter__(self): - self.tifa.path_id += 1 - self.id = self.tifa.path_id - self.tifa.path_names.append(str(self.id) + self.name) - self.tifa.path_chain.insert(0, self.id) - self.tifa.name_map[self.id] = {} - self.tifa.path_parents[self.id] = self.origin_path - - def __exit__(self, type, value, traceback): - self.tifa.path_names.pop() - self.tifa.path_chain.pop(0) - - class NewScope: - """ - Context manager for entering and leaving scopes (e.g., inside of - function calls). - - Args: - tifa (Tifa): The tifa instance, so we can modify some of its - properties that track variables and paths. - definitions_scope_chain (list of int): The scope chain of the - definition - """ - - def __init__(self, tifa, definitions_scope_chain, class_type=None): - self.tifa = tifa - self.definitions_scope_chain = definitions_scope_chain - self.class_type = class_type - - def __enter__(self): - # Manage scope - self.old_scope = self.tifa.scope_chain[:] - # Move to the definition's scope chain - self.tifa.scope_chain = self.definitions_scope_chain[:] - # And then enter its body's new scope - self.tifa.scope_id += 1 - self.tifa.scope_chain.insert(0, self.tifa.scope_id) - # Register as class potentially - if self.class_type is not None: - self.class_type.scope_id = self.tifa.scope_id - self.tifa.class_scopes[self.tifa.scope_id] = self.class_type - - def __exit__(self, type, value, traceback): - # Finish up the scope - self.tifa._finish_scope() - # Leave the body - self.tifa.scope_chain.pop(0) - # Restore the scope - self.tifa.scope_chain = self.old_scope diff --git a/src/lib/pedal/tifa/type_definitions.py b/src/lib/pedal/tifa/type_definitions.py deleted file mode 100644 index 8c6d8ce2f6..0000000000 --- a/src/lib/pedal/tifa/type_definitions.py +++ /dev/null @@ -1,599 +0,0 @@ -import ast - - -def are_literals_equal(first, second): - if first is None or second is None: - return False - elif not isinstance(first, type(second)): - return False - else: - if isinstance(first, LiteralTuple): - if len(first.value) != len(second.value): - return False - for l, s in zip(first.value, second.value): - if not are_literals_equal(l, s): - return False - return True - elif not isinstance(first, LiteralValue): - return True - else: - return first.value == second.value - - -class LiteralValue: - """ - A special literal representation of a value, used to represent access on - certain container types. - """ - - def __init__(self, value): - self.value = value - - -class LiteralNum(LiteralValue): - """ - Used to capture indexes of containers. - """ - - def type(self): - return NumType() - - -class LiteralBool(LiteralValue): - def type(self): - return BoolType() - - -class LiteralStr(LiteralValue): - def type(self): - return StrType() - - -class LiteralTuple(LiteralValue): - def type(self): - return TupleType(self.value) - - -class LiteralNone(LiteralValue): - def type(self): - return LiteralNone() - - -def literal_from_json(val): - if val['type'] == 'LiteralStr': - return LiteralStr(val['value']) - elif val['type'] == 'LiteralNum': - return LiteralNum(val['value']) - elif val['type'] == 'LiteralBool': - return LiteralBool(val['value']) - - -def _dict_extends(d1, d2): - """ - Helper function to create a new dictionary with the contents of the two - given dictionaries. Does not modify either dictionary, and the values are - copied shallowly. If there are repeates, the second dictionary wins ties. - - The function is written to ensure Skulpt compatibility. - - Args: - d1 (dict): The first dictionary - d2 (dict): The second dictionary - """ - d3 = {} - for key, value in d1.items(): - d3[key] = value - for key, value in d2.items(): - d3[key] = value - return d3 - - -class Type: - """ - Parent class for all other types, used to provide a common interface. - - TODO: Handle more complicated object-oriented types and custom types - (classes). - """ - fields = {} - immutable = False - singular_name = 'a type' - - def clone(self): - return self.__class__() - - def __str__(self): - return str(self.__class__.__name__) - - def precise_description(self): - return self.singular_name - - def clone_mutably(self): - if self.immutable: - return self.clone() - else: - return self - - def index(self, i): - return self.clone() - - def load_attr(self, attr, tifa, callee=None, callee_position=None): - if attr in self.fields: - return self.fields[attr] - # TODO: Handle more kinds of common mistakes - if attr == "append": - tifa.report_issue('Append to non-list', - {'name': tifa.identify_caller(callee), - 'position': callee_position, 'type': self}) - return UnknownType() - - def is_empty(self): - return True - - def is_equal(self, other): - # TODO: Make this more sophisticated! - if type(self) not in TYPE_LOOKUPS: - return False - return other in TYPE_LOOKUPS[type(self)] - - def is_instance(self, other): - # TODO: Implement this correctly - return self.is_equal(other) - - -class UnknownType(Type): - """ - A special type used to indicate an unknowable type. - """ - - -class RecursedType(Type): - """ - A special type used as a placeholder for the result of a - recursive call that we have already process. This type will - be dominated by any actual types, but will not cause an issue. - """ - - -class FunctionType(Type): - """ - - Special values for `returns`: - identity: Returns the first argument's type - element: Returns the first argument's first element's type - void: Returns the NoneType - """ - singular_name = 'a function' - - def __init__(self, definition=None, name="*Anonymous", returns=None): - if returns is not None and definition is None: - if returns == 'identity': - def definition(ti, ty, na, args, ca): - if args: - return args[0].clone() - return UnknownType() - elif returns == 'element': - def definition(ti, ty, na, args, ca): - if args: - return args[0].index(0) - return UnknownType() - elif returns == 'void': - def definition(ti, ty, na, args, ca): - return NoneType() - else: - def definition(ti, ty, na, args, ca): - return returns.clone() - self.definition = definition - self.name = name - - -class ClassType(Type): - singular_name = 'a class' - - def __init__(self, name): - self.name = name - self.fields = {} - self.scope_id = None - - def add_attr(self, name, type): - self.fields[name] = type - - def get_constructor(self): - i = InstanceType(self) - return FunctionType(name='__init__', returns=i) - - def clone(self): - return ClassType(self.name) - - -class InstanceType(Type): - def __init__(self, parent): - self.parent = parent - self.fields = parent.fields - - def __str__(self): - return "InstanceTypeOf" + str(self.parent.name) - - def clone(self): - return InstanceType(self.parent) - - def add_attr(self, name, type): - # TODO: What if this is a type change? - self.parent.add_attr(name, type) - - -class NumType(Type): - singular_name = 'a number' - immutable = True - - def index(self, i): - return UnknownType() - - -class NoneType(Type): - singular_name = 'a None' - immutable = True - - -class BoolType(Type): - singular_name = 'a boolean' - immutable = True - - -class TupleType(Type): - """ - """ - singular_name = 'a tuple' - - def __init__(self, subtypes=None): - if subtypes is None: - subtypes = [] - self.subtypes = subtypes - - def index(self, i): - if isinstance(i, LiteralNum): - return self.subtypes[i.value].clone() - else: - return self.subtypes[i].clone() - - def clone(self): - return TupleType([t.clone() for t in self.subtypes]) - - immutable = True - - -class ListType(Type): - singular_name = 'a list' - - def __init__(self, subtype=None, empty=True): - if subtype is None: - subtype = UnknownType() - self.subtype = subtype - self.empty = empty - - def index(self, i): - return self.subtype.clone() - - def clone(self): - return ListType(self.subtype.clone(), self.empty) - - def load_attr(self, attr, tifa, callee=None, callee_position=None): - if attr == 'append': - def _append(tifa, function_type, callee, args, position): - if args: - cloned_type = ListType(subtype=args[0].clone(), - empty=False) - if callee: - tifa.append_variable(callee, cloned_type, position) - self.empty = False - self.subtype = args[0] - - return FunctionType(_append, 'append') - return Type.load_attr(self, attr, tifa, callee, callee_position) - - def is_empty(self): - return self.empty - - -class StrType(Type): - singular_name = 'a string' - - def __init__(self, empty=False): - self.empty = empty - - def index(self, i): - return StrType() - - def is_empty(self): - return self.empty - - fields = _dict_extends(Type.fields, {}) - immutable = True - - -StrType.fields.update({ - # Methods that return strings - "capitalize": FunctionType(name='capitalize', returns=StrType()), - "center": FunctionType(name='center', returns=StrType()), - "expandtabs": FunctionType(name='expandtabs', returns=StrType()), - "join": FunctionType(name='join', returns=StrType()), - "ljust": FunctionType(name='ljust', returns=StrType()), - "lower": FunctionType(name='lower', returns=StrType()), - "lstrip": FunctionType(name='lstrip', returns=StrType()), - "replace": FunctionType(name='replace', returns=StrType()), - "rjust": FunctionType(name='rjust', returns=StrType()), - "rstrip": FunctionType(name='rstrip', returns=StrType()), - "strip": FunctionType(name='strip', returns=StrType()), - "swapcase": FunctionType(name='swapcase', returns=StrType()), - "title": FunctionType(name='title', returns=StrType()), - "translate": FunctionType(name='translate', returns=StrType()), - "upper": FunctionType(name='upper', returns=StrType()), - "zfill": FunctionType(name='zfill', returns=StrType()), - # Methods that return numbers - "count": FunctionType(name='count', returns=NumType()), - "find": FunctionType(name='find', returns=NumType()), - "rfind": FunctionType(name='rfind', returns=NumType()), - "index": FunctionType(name='index', returns=NumType()), - "rindex": FunctionType(name='rindex', returns=NumType()), - # Methods that return booleans - "startswith": FunctionType(name='startswith', returns=BoolType()), - "endswith": FunctionType(name='endswith', returns=BoolType()), - "isalnum": FunctionType(name='isalnum', returns=BoolType()), - "isalpha": FunctionType(name='isalpha', returns=BoolType()), - "isdigit": FunctionType(name='isdigit', returns=BoolType()), - "islower": FunctionType(name='islower', returns=BoolType()), - "isspace": FunctionType(name='isspace', returns=BoolType()), - "istitle": FunctionType(name='istitle', returns=BoolType()), - "isupper": FunctionType(name='isupper', returns=BoolType()), - # Methods that return List of Strings - "rsplit": FunctionType(name='rsplit', returns=ListType(StrType(), empty=False)), - "split": FunctionType(name='split', returns=ListType(StrType(), empty=False)), - "splitlines": FunctionType(name='splitlines', returns=ListType(StrType(), empty=False)) -}) - - -class FileType(Type): - singular_name = 'a file' - - def index(self, i): - return StrType() - - fields = _dict_extends(Type.fields, { - 'close': FunctionType(name='close', returns='void'), - 'read': FunctionType(name='read', returns=StrType()), - 'readlines': FunctionType(name='readlines', returns=ListType(StrType(), False)) - }) - - def is_empty(self): - return False - - -class DictType(Type): - singular_name = 'a dictionary' - - def precise_description(self): - base = "a dictionary" - if self.literals: - base += " mapping " - # TODO: Handle recursive precise names more correctly - base += ", ".join("{!r} to {}".format(l.value, r.precise_description()) - for l, r in zip(self.literals, self.values)) - elif self.keys: - keys = self.keys[0] if isinstance(self.keys, list) else self.keys - values = self.values[0] if isinstance(self.values, list) else self.values - base += " mapping {}".format(keys.precise_description()) - base += " to {}".format(values.precise_description()) - return base - - def __init__(self, empty=False, literals=None, keys=None, values=None): - self.empty = empty - self.literals = literals - self.values = values - self.keys = keys - - def clone(self): - return DictType(self.empty, self.literals, self.keys, self.values) - - def is_empty(self): - return self.empty - - def has_literal(self, l): - for literal, value in zip(self.literals, self.values): - if are_literals_equal(literal, l): - return value - return None - - def index(self, i): - if self.empty: - return UnknownType() - elif self.literals is not None: - for literal, value in zip(self.literals, self.values): - if are_literals_equal(literal, i): - return value.clone() - return UnknownType() - else: - return self.keys.clone() - - def update_key(self, literal_key, type): - self.literals.append(literal_key) - self.values.append(type) - - def load_attr(self, attr, tifa, callee=None, callee_position=None): - if attr == 'items': - def _items(tifa, function_type, callee, args, position): - if self.literals is None: - return ListType(TupleType([self.keys, self.values]), - empty=False) - else: - return ListType(TupleType([self.literals[0].type(), - self.values[0]]), - empty=False) - - return FunctionType(_items, 'items') - elif attr == 'keys': - def _keys(tifa, function_type, callee, args, position): - if self.literals is None: - return ListType(self.keys, empty=False) - else: - return ListType(self.literals[0].type(), empty=False) - - return FunctionType(_keys, 'keys') - elif attr == 'values': - def _values(tifa, function_type, callee, args, position): - if self.literals is None: - return ListType(self.values, empty=False) - else: - return ListType(self.values[0], empty=False) - - return FunctionType(_values, 'values') - return Type.load_attr(self, attr, tifa, callee, callee_position) - - -class ModuleType(Type): - singular_name = 'a module' - - def __init__(self, name="*UnknownModule", submodules=None, fields=None): - self.name = name - if submodules is None: - submodules = {} - self.submodules = submodules - if fields is None: - fields = {} - self.fields = fields - - -class SetType(ListType): - singular_name = 'a set' - - -class GeneratorType(ListType): - singular_name = 'a generator' - - -# Custom parking class in blockpy - -class TimeType(Type): - singular_name = 'a time of day' - - -class DayType(Type): - singular_name = 'a day of the week' - - -try: - from numbers import Number -except Exception: - Number = int - -TYPE_LOOKUPS = { - FunctionType: ('function', FunctionType, 'FunctionType'), - ClassType: ('class', ClassType, 'ClassType'), - InstanceType: ('instance', InstanceType, 'InstanceType'), - NumType: ('num', int, float, complex, NumType, Number, 'NumType'), - BoolType: ('bool', bool, BoolType, 'BoolType'), - NoneType: ('None', None, NoneType, 'NoneType'), - TupleType: ('tuple', tuple, TupleType, 'TupleType'), - ListType: ('list', list, ListType, 'ListType'), - StrType: ('str', str, StrType, 'StrType'), - FileType: ('file', FileType, 'FileType'), - DictType: ('dict', dict, DictType, 'DictType'), - SetType: ('set', set, SetType, 'SetType'), -} - - -def type_from_json(val): - if val['type'] == 'DictType': - values = [type_from_json(v) for v in val['values']] - empty = val.get('empty', None) - if 'literals' in val: - literals = [literal_from_json(l) for l in val['literals']] - return DictType(empty, literals=literals, values=values) - else: - keys = [type_from_json(k) for k in val['keys']] - return DictType(empty, keys=keys, values=values) - elif val['type'] == 'ListType': - return ListType(type_from_json(val.get('subtype', None)), - val.get('empty', None)) - elif val['type'] == 'StrType': - return StrType(val.get('empty', None)) - elif val['type'] == 'BoolType': - return BoolType() - elif val['type'] == 'NoneType': - return NoneType() - elif val['type'] == 'NumType': - return NumType() - elif val['type'] == 'ModuleType': - submodules = {name: type_from_json(m) - for name, m in val.get('submodules', {}).items()} - fields = {name: type_from_json(m) - for name, m in val.get('fields', {}).items()} - return ModuleType(name=val.get('name'), submodules=submodules, - fields=fields) - elif val['type'] == 'FunctionType': - returns = type_from_json(val.get('returns', {'type': 'NoneType'})) - return FunctionType(name=val.get('name'), returns=returns) - - -def type_to_literal(type): - if isinstance(type, NumType): - return LiteralNum(0) - elif isinstance(type, StrType): - return LiteralStr("") - else: - # TODO: Finish the mapping - return LiteralStr("") - - -TYPE_STRINGS = { - "str": StrType, "string": StrType, - "num": NumType, "number": NumType, "int": NumType, "integer": NumType, "float": NumType, - "complex": NumType, - "bool": BoolType, "boolean": BoolType, - "none": NoneType, - "dict": DictType, "dictionary": DictType, - "list": ListType, - "tuple": TupleType, - "set": SetType, - "file": FileType, - "func": FunctionType, "function": FunctionType, - "class": ClassType, -} - - -def get_tifa_type_from_str(value, self): - # if value in custom_types: - # return custom_types[value] - if value.lower() in TYPE_STRINGS: - return TYPE_STRINGS[value.lower()]() - else: - variable = self.find_variable_scope(value) - if variable.exists: - state = self.load_variable(value) - return state.type - # custom_types.add(value) - return UnknownType() - # TODO: handle custom types - - -def get_tifa_type(v, self): - if isinstance(v, ast.Str): - return get_tifa_type_from_str(v.s, self) - elif isinstance(v, ast.Name): - return get_tifa_type_from_str(v.id, self) - elif isinstance(v, ast.List): - elements = v.elts - if elements: - return ListType(subtype=get_tifa_type(elements[0], self)) - else: - return ListType(empty=True) - elif isinstance(v, ast.Dict): - if not v.keys: - return DictType(empty=True) - if all(isinstance(k, ast.Str) for k in v.keys): - return DictType(literals=[LiteralStr(s.s) for s in v.keys], - values=[get_tifa_type(vv, self) for vv in v.values]) - return DictType(keys=[get_tifa_type(k, self) for k in v.keys], - values=[get_tifa_type(vv, self) for vv in v.values]) - # TODO: Finish filling in static type system - else: - return UnknownType() diff --git a/src/lib/pedal/tifa/type_operations.py b/src/lib/pedal/tifa/type_operations.py deleted file mode 100644 index 5fef8fde5a..0000000000 --- a/src/lib/pedal/tifa/type_operations.py +++ /dev/null @@ -1,153 +0,0 @@ -import ast - -from pedal.tifa.type_definitions import (UnknownType, NumType, BoolType, - TupleType, ListType, StrType, - DictType, SetType, GeneratorType, - DayType, TimeType, FunctionType, TYPE_STRINGS) - - -def merge_types(left, right): - # TODO: Check that lists/sets have the same subtypes - if isinstance(left, (ListType, SetType, GeneratorType)): - if left.empty: - return right.subtype - else: - return left.subtype.clone() - elif isinstance(left, TupleType): - return left.subtypes + right.subtypes - - -def NumType_any(*x): - return NumType() - - -def StrType_any(*x): - return StrType() - - -def BoolType_any(*x): - return BoolType() - - -def keep_left(left, right): - return left - - -def keep_right(left, right): - return right - - -VALID_BINOP_TYPES = { - ast.Add: {NumType: {NumType: NumType_any}, - StrType: {StrType: StrType_any}, - ListType: {ListType: merge_types}, - TupleType: {TupleType: merge_types}}, - ast.Sub: {NumType: {NumType: NumType_any}, - SetType: {SetType: merge_types}}, - ast.Div: {NumType: {NumType: NumType_any}}, - ast.FloorDiv: {NumType: {NumType: NumType_any}}, - ast.Mult: {NumType: {NumType: NumType_any, - StrType: StrType_any, - ListType: keep_right, - TupleType: keep_right}, - StrType: {NumType: StrType_any}, - ListType: {NumType: keep_left}, - TupleType: {NumType: keep_left}}, - ast.Pow: {NumType: {NumType: NumType_any}}, - # TODO: Should we allow old-fashioned string interpolation? - # Currently, I vote no because it makes the code harder and is bad form. - ast.Mod: {NumType: {NumType: NumType_any}}, - ast.LShift: {NumType: {NumType: NumType_any}}, - ast.RShift: {NumType: {NumType: NumType_any}}, - ast.BitOr: {NumType: {NumType: NumType_any}, - BoolType: {NumType: NumType_any, - BoolType: BoolType_any}, - SetType: {SetType: merge_types}}, - ast.BitXor: {NumType: {NumType: NumType_any}, - BoolType: {NumType: NumType_any, - BoolType: BoolType_any}, - SetType: {SetType: merge_types}}, - ast.BitAnd: {NumType: {NumType: NumType_any}, - BoolType: {NumType: NumType_any, - BoolType: BoolType_any}, - SetType: {SetType: merge_types}} -} -VALID_UNARYOP_TYPES = { - ast.UAdd: {NumType: NumType}, - ast.USub: {NumType: NumType}, - ast.Invert: {NumType: NumType} -} - - -def are_types_equal(left, right, formal=False): - """ - Determine if two types are equal. - - This could be more Polymorphic - move the code for each type into - its respective class instead. - - Args: - formal (bool): Whether the left argument is formal, indicating that it can accept - type names. - """ - if left is None or right is None: - return False - elif isinstance(left, UnknownType) or isinstance(right, UnknownType): - return False - elif not isinstance(left, type(right)): - return False - elif isinstance(left, (GeneratorType, ListType)): - if left.empty or right.empty: - return True - else: - return are_types_equal(left.subtype, right.subtype) - elif isinstance(left, TupleType): - if left.empty or right.empty: - return True - elif len(left.subtypes) != len(right.subtypes): - return False - else: - for l, r in zip(left.subtypes, right.subtypes): - if not are_types_equal(l, r): - return False - return True - elif isinstance(left, DictType): - # print(left.empty, left.keys, left.literals, right) - if not left.keys and not left.literals: - return isinstance(right, DictType) - # print("L", [literal.value for literal in left.literals], [v.singular_name - # if not formal and not isinstance(v, FunctionType) - # else TYPE_STRINGS[v.name]().singular_name - # for v in left.values]) - # print("R", [literal.value for literal in right.literals], [v.singular_name for v in right.values]) - if left.empty or right.empty: - return True - elif left.literals is not None and right.literals is not None: - if len(left.literals) != len(right.literals): - return False - else: - for l, r in zip(left.literals, right.literals): - if not are_types_equal(l, r): - return False - for l, r in zip(left.values, right.values): - if formal: - if isinstance(l, FunctionType) and l.name in TYPE_STRINGS: - l = TYPE_STRINGS[l.name]() - if isinstance(r, FunctionType) and r.name in TYPE_STRINGS: - r = TYPE_STRINGS[r.name]() - if not are_types_equal(l, r): - return False - return True - elif left.literals is not None or right.literals is not None: - return False - else: - keys_equal = are_types_equal(left.keys, right.keys) - values_equal = are_types_equal(left.values, right.values) - return keys_equal and values_equal - else: - return True - - -ORDERABLE_TYPES = (NumType, BoolType, StrType, ListType, DayType, TimeType, - SetType, TupleType) -INDEXABLE_TYPES = (StrType, ListType, SetType, TupleType, DictType) diff --git a/src/lib/pedal/toolkit/__init__.py b/src/lib/pedal/toolkit/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/lib/pedal/toolkit/files.py b/src/lib/pedal/toolkit/files.py deleted file mode 100644 index e8edf6c73f..0000000000 --- a/src/lib/pedal/toolkit/files.py +++ /dev/null @@ -1,56 +0,0 @@ -from pedal.cait.cait_api import parse_program -from pedal.report.imperative import explain -from pedal.toolkit.utilities import ensure_literal - - -def files_not_handled_correctly(*filenames): - """ - Statically detect if files have been opened and closed correctly. - This is only useful in the case of very simplistic file handling. - """ - if filenames and isinstance(filenames[0], int): - num_filenames = filenames[0] - actual_filenames = False - else: - num_filenames = len(filenames) - actual_filenames = True - ast = parse_program() - calls = ast.find_all("Call") - called_open = [] - closed = [] - for a_call in calls: - if a_call.func.ast_name == 'Name': - if a_call.func.id == 'open': - if not a_call.args: - explain("You have called the open function " - "without any arguments. It needs a filename.") - return True - called_open.append(a_call) - elif a_call.func.id == 'close': - explain("You have attempted to call close as a " - "function, but it is actually a method of the " - "file object.", 'verifier') - return True - elif a_call.func.ast_name == 'Attribute': - if a_call.func.attr == 'open': - explain("You have attempted to call open as a " - "method, but it is actually a built-in function.") - return True - elif a_call.func.attr == 'close': - closed.append(a_call) - if len(called_open) < num_filenames: - explain("You have not opened all the files you were supposed to.") - return True - elif len(called_open) > num_filenames: - explain("You have opened more files than you were supposed to.") - return True - withs = ast.find_all("With") - if len(withs) + len(closed) < num_filenames: - explain("You have not closed all the files you were supposed to.") - return True - elif len(withs) + len(closed) > num_filenames: - explain("You have closed more files than you were supposed to.") - return True - if actual_filenames: - return ensure_literal(*filenames) - return False diff --git a/src/lib/pedal/toolkit/functions.py b/src/lib/pedal/toolkit/functions.py deleted file mode 100644 index 9dad896dfa..0000000000 --- a/src/lib/pedal/toolkit/functions.py +++ /dev/null @@ -1,401 +0,0 @@ -from pedal.cait.cait_api import parse_program -from pedal.report.imperative import gently, explain, gently_r, explain_r, MAIN_REPORT -from pedal.sandbox import compatibility -import ast - -from pedal.toolkit.signatures import type_check, parse_type, normalize_type, parse_type_value, test_type_equality - -DELTA = 0.001 - - -def all_documented(): - ast = parse_program() - defs = ast.find_all('FunctionDef') + ast.find_all("ClassDef") - for a_def in defs: - if a_def.name == "__init__": - continue - if (a_def.body and - (a_def.body[0].ast_name != "Expr" or - a_def.body[0].value.ast_name != "Str")): - if a_def.ast_name == 'FunctionDef': - gently("You have an undocumented function: " + a_def.name) - else: - gently("You have an undocumented class: " + a_def.name) - return False - return True - - -def get_arg_name(node): - name = node.id - if name is None: - return node.arg - else: - return name - - -def match_function(name, root=None): - if root is None: - ast = parse_program() - else: - ast = root - defs = ast.find_all('FunctionDef') - for a_def in defs: - if a_def._name == name: - return a_def - return None - - -def match_signature_muted(name, length, *parameters): - ast = parse_program() - defs = ast.find_all('FunctionDef') - for a_def in defs: - if a_def._name == name: - found_length = len(a_def.args.args) - if found_length != length: - return None - elif parameters: - for parameter, arg in zip(parameters, a_def.args.args): - arg_name = get_arg_name(arg) - if arg_name != parameter: - return None - else: - return a_def - else: - return a_def - return None - - -def find_def_by_name(name, root=None): - if root is None: - root = parse_program() - defs = root.find_all('FunctionDef') - for a_def in defs: - if a_def._name == name: - return a_def - return None - - -def match_parameters(name, *types, returns=None, root=None): - defn = find_def_by_name(name, root) - if defn: - for expected, actual in zip(types, defn.args.args): - if actual.annotation: - expected = parse_type_value(expected, True) - actual_type = parse_type(actual.annotation) - if not test_type_equality(expected, actual_type): - gently_r("Error in definition of function `{}` parameter `{}`. Expected `{}`, " - "instead found `{}`.".format(name, actual.arg, expected, actual_type), - "wrong_parameter_type") - return None - else: - if returns is not None: - if not isinstance(returns, str): - returns = returns.__name__ - if defn.returns: - actual_type = parse_type(defn.returns) - if not type_check(returns, actual_type): - gently_r("Error in definition of function `{}` return type. Expected `{}`, " - "instead found {}.".format(name, returns, actual_type), - "wrong_return_type") - return None - else: - gently_r("Error in definition of function `{}` return type. Expected `{}`, " - "but there was no return type specified.".format(name, returns), - "missing_return_type") - return None - return defn - - -def match_signature(name, length, *parameters): - ast = parse_program() - defs = ast.find_all('FunctionDef') - for a_def in defs: - if a_def._name == name: - found_length = len(a_def.args.args) - if found_length < length: - gently_r("The function named {} has fewer parameters ({}) " - "than expected ({}). ".format(name, found_length, length), "insuff_args") - elif found_length > length: - gently_r("The function named {} has more parameters ({}) " - "than expected ({}). ".format(name, found_length, length), "excess_args") - elif parameters: - for parameter, arg in zip(parameters, a_def.args.args): - arg_name = get_arg_name(arg) - if arg_name != parameter: - gently_r("Error in definition of {}. Expected a parameter named {}, " - "instead found {}.".format(name, parameter, arg_name), "name_missing") - return None - else: - return a_def - else: - return a_def - else: - gently_r("No function named {name} was found.".format(name=name), - "missing_func_{name}".format(name=name)) - return None - - -TEST_TABLE_HEADER = "" -TEST_TABLE_OUTPUT = TEST_TABLE_HEADER + ( - "" -) -TEST_TABLE_UNITS = TEST_TABLE_HEADER + ( - "" -) -GREEN_CHECK = "" -RED_X = "" - - -def output_test(name, *tests): - student = compatibility.get_student_data() - if name in student.data: - the_function = student.data[name] - if callable(the_function): - result = TEST_TABLE_OUTPUT - success = True - success_count = 0 - for test in tests: - inp = test[:-1] - inputs = ', '.join(["{}".format(repr(i)) for i in inp]) - out = test[-1] - tip = "" - if isinstance(out, tuple): - tip = out[1] - out = out[0] - message = "" + ("" * 2) - test_out = compatibility.capture_output(the_function, *inp) - if isinstance(out, str): - if len(test_out) < 1: - message = message.format(inputs, repr(out), "No output", tip) - message = "" + RED_X + message + "" - if tip: - message += "" - success = False - elif len(test_out) > 1: - message = message.format(inputs, "\n".join(out), "Too many outputs", tip) - message = "" + RED_X + message + "" - if tip: - message += "" - success = False - elif out not in test_out: - message = message.format(inputs, "\n".join(out), "\n".join(test_out), tip) - message = "" + RED_X + message + "" - if tip: - message += "" - success = False - else: - message = message.format(inputs, "\n".join(out), "\n".join(test_out), tip) - message = "" + GREEN_CHECK + message + "" - success_count += 1 - elif out != test_out: - if len(test_out) < 1: - message = message.format(inputs, "\n".join(out), "No output", tip) - else: - message = message.format(inputs, "\n".join(out), "\n".join(test_out), tip) - message = "" + RED_X + message + "" - if tip: - message += "" - success = False - else: - message = message.format(inputs, "\n".join(out), "\n".join(test_out), tip) - message = "" + GREEN_CHECK + message + "" - success_count += 1 - result += message - if success: - return the_function - else: - result = ("I ran your function {} on some new arguments, and it gave the wrong output " - "{}/{} times.".format(name, len(tests) - success_count, len(tests)) + result) - gently_r(result + "
    ArgumentsExpectedActual
    ArgumentsReturnedExpected
    {}
    {}
    " + tip + "
    " + tip + "
    " + tip + "
    " + tip + "
    ", "wrong_output") - return None - else: - gently_r("You defined {}, but did not define it as a function.".format(name), "not_func_def") - return None - else: - gently_r("The function {} was not defined.".format(name), "no_func_def") - return None - - -def unit_test(name, *tests): - """ - Show a table - :param name: - :param tests: - :return: - """ - student = compatibility.get_student_data() - if name in student.data: - the_function = student.data[name] - if callable(the_function): - result = TEST_TABLE_UNITS - success = True - success_count = 0 - for test in tests: - inp = test[:-1] - inputs = ', '.join(["{}".format(repr(i)) for i in inp]) - out = test[-1] - tip = "" - if isinstance(out, tuple): - tip = out[1] - out = out[0] - message = ("{}" * 3) - ran = True - try: - test_out = the_function(*inp) - except Exception as e: - message = message.format(inputs, str(e), repr(out)) - message = "" + RED_X + message + "" - success = False - ran = False - if not ran: - result += message - continue - message = message.format(inputs, repr(test_out), repr(out)) - if (isinstance(out, float) and - isinstance(test_out, (float, int)) and - abs(out - test_out) < DELTA): - message = "" + GREEN_CHECK + message + "" - success_count += 1 - elif out != test_out: - # gently(message) - message = "" + RED_X + message + "" - if tip: - message += "" + tip + "" - success = False - else: - message = "" + GREEN_CHECK + message + "" - success_count += 1 - result += message - if success: - return the_function - else: - result = "I ran your function {} on some new arguments, " \ - "and it failed {}/{} tests.".format(name, len(tests) - success_count, len(tests)) + result - gently_r(result + "", "tests_failed") - return None - else: - gently("You defined {}, but did not define it as a function.".format(name)) - return None - else: - gently("The function {} was not defined.".format(name)) - return None - - -class _LineVisitor(ast.NodeVisitor): - """ - NodeVisitor subclass that visits every statement of a program and tracks - their line numbers in a list. - - Attributes: - lines (list[int]): The list of lines that were visited. - """ - - def __init__(self): - self.lines = [] - - def _track_lines(self, node): - self.lines.append(node.lineno) - self.generic_visit(node) - - visit_FunctionDef = _track_lines - visit_AsyncFunctionDef = _track_lines - visit_ClassDef = _track_lines - visit_Return = _track_lines - visit_Delete = _track_lines - visit_Assign = _track_lines - visit_AugAssign = _track_lines - visit_AnnAssign = _track_lines - visit_For = _track_lines - visit_AsyncFor = _track_lines - visit_While = _track_lines - visit_If = _track_lines - visit_With = _track_lines - visit_AsyncWith = _track_lines - visit_Raise = _track_lines - visit_Try = _track_lines - visit_Assert = _track_lines - visit_Import = _track_lines - visit_ImportFrom = _track_lines - visit_Global = _track_lines - visit_Nonlocal = _track_lines - visit_Expr = _track_lines - visit_Pass = _track_lines - visit_Continue = _track_lines - visit_Break = _track_lines - - -def check_coverage(report=None): - """ - Checks that all the statements in the program have been executed. - This function only works when a tracer_style has been set in the sandbox, - or you are using an environment that automatically traces calls (e.g., - BlockPy). - - TODO: Make compatible with tracer_style='coverage' - - Args: - report (Report): The Report to draw source code from; if not given, - defaults to MAIN_REPORT. - Returns: - bool or set[int]: If the source file was not parsed, None is returned. - If there were fewer lines traced in execution than are found in - the AST, then the set of unexecuted lines are returned. Otherwise, - False is returned. - """ - if report is None: - report = MAIN_REPORT - if not report['source']['success']: - return None, 0 - lines_executed = set(compatibility.trace_lines()) - if -1 in lines_executed: - lines_executed.remove(-1) - student_ast = report['source']['ast'] - visitor = _LineVisitor() - visitor.visit(student_ast) - lines_in_code = set(visitor.lines) - if lines_executed < lines_in_code: - return lines_in_code - lines_executed, len(lines_executed) / len(lines_in_code) - else: - return False, 1 - - -def ensure_coverage(percentage=.5, destructive=False, report=None): - """ - Note that this avoids destroying the current sandbox instance stored on the - report, if there is one present. - - Args: - destructive (bool): Whether or not to remove the sandbox. - """ - if report is None: - report = MAIN_REPORT - student_code = report['source']['code'] - unexecuted_lines, percent_covered = check_coverage(report) - if unexecuted_lines: - if percent_covered <= percentage: - gently("Your code coverage is not adequate. You must cover at least half your code to receive feedback.") - return False - return True - - -def ensure_cisc108_tests(test_count, report=None): - student = compatibility.get_student_data() - if 'assert_equal' not in student.data: - gently("You have not imported assert_equal from the cisc108 module.") - return False - assert_equal = student.data['assert_equal'] - if not hasattr(assert_equal, 'student_tests'): - gently("The assert_equal function has been modified. Do not let it be overwritten!", - label="Assertion Function Corrupted") - return False - student_tests = assert_equal.student_tests - if student_tests.tests == 0: - gently("You are not unit testing the result.", label="No Student Unit Tests") - return False - elif student_tests.tests < test_count: - gently("You have not written enough unit tests.", label="Not Enough Student Unit Tests") - return False - elif student_tests.failures > 0: - gently("Your unit tests are not passing.", label="Student Unit Tests Failing") - return False - return True diff --git a/src/lib/pedal/toolkit/imports.py b/src/lib/pedal/toolkit/imports.py deleted file mode 100644 index a5e352ea97..0000000000 --- a/src/lib/pedal/toolkit/imports.py +++ /dev/null @@ -1,25 +0,0 @@ -from pedal.cait.cait_api import parse_program -from pedal.report.imperative import explain - - -def ensure_imports(*modules): - ast = parse_program() - for module in modules: - imports = ast.find_all("Import") - import_froms = ast.find_all("ImportFrom") - if not imports and not import_froms: - explain("You need to import the {} module.".format(module)) - return True - success = False - if imports: - if any(alias._name == module - for i in imports - for alias in i.names): - success = True - if import_froms: - if any(i.module == module for i in import_froms): - success = True - if not success: - explain("You need to import the {} module.".format(module)) - return True - return False diff --git a/src/lib/pedal/toolkit/plotting.py b/src/lib/pedal/toolkit/plotting.py deleted file mode 100644 index a7b4b726db..0000000000 --- a/src/lib/pedal/toolkit/plotting.py +++ /dev/null @@ -1,184 +0,0 @@ -from pedal.toolkit.utilities import function_is_called -from pedal.cait.cait_api import parse_program, def_use_error -from pedal.report.imperative import gently, explain_r, gently_r -from pedal.sandbox import compatibility - -PLOT_LABEL = {'plot': 'line plot', - 'hist': 'histogram', - 'scatter': 'scatter plot'} - - -def prevent_incorrect_plt(): - ast = parse_program() - plts = [n for n in ast.find_all("Name") if n.id == 'plt'] - if plts and def_use_error(plts[0]): - # TODO: I converted this to the explain_r function, but I wasn't sure about the priority thing ~Luke Gusukuma - # explain("You have imported the matplotlib.pyplot module, " - # "but you did not rename it to plt using " - # "import matplotlib.pyplot as plt.

    (plt_rename_err)

    ", 'verifier') - explain_r("You have imported the matplotlib.pyplot module, " - "but you did not rename it to plt using " - "import matplotlib.pyplot as plt.", - "plt_rename_err", - priority='verifier') - return True - matplotlib_names = ['plot', 'hist', 'scatter', - 'title', 'xlabel', 'ylabel', 'show'] - for name in matplotlib_names: - for n in ast.find_all("Name"): - if n.id == name: - if def_use_error(n): - # explain(("You have attempted to use the MatPlotLib " - # "function named {0}. However, you " - # "imported MatPlotLib in a way that does not " - # "allow you to use the function directly. I " - # "recommend you use plt.{0} instead, " - # "after you use import matplotlib.pyplot as " - # "plt.

    (plt_wrong_import)

    ").format(name), 'verifier') - explain_r(("You have attempted to use the MatPlotLib " - "function named {0}. However, you " - "imported MatPlotLib in a way that does not " - "allow you to use the function directly. I " - "recommend you use plt.{0} instead, " - "after you use import matplotlib.pyplot as " - "plt.").format(name), - "plt_wrong_import", - priority='verifier') - return True - return False - - -def ensure_correct_plot(function_name): - for a_plot, label in PLOT_LABEL.items(): - if function_name == a_plot: - if not function_is_called(function_name): - gently_r("You are not calling the {func_name} function.".format(func_name=function_name), - "no_{func_name}_call".format(func_name=function_name)) - return True - elif function_is_called(a_plot): - gently_r("You have called the {} function, which makes a {}.".format(a_plot, label), - "wrong_plt") - return True - return False - - -def ensure_show(): - if not function_is_called("show"): - gently_r("You have not called show function, which " - "actually creates the graph.", "no_show") - return True - return False - - -def compare_data(plt_type, correct, given): - """ - Determines whether the given data matches any of the data found in the - correct data. This handles plots of different types: if a histogram - was plotted with the expected data for a line plot, it will return True. - - Args: - plt_type (str): The expected type of this plot - correct (List of Int or List of List of Int): The expected data. - given (Dict): The actual plotted data and information - Returns: - bool: Whether the correct data was found in the given plot. - """ - # Infer arguments - if plt_type == 'hist': - correct_xs = None - correct_ys = correct - elif not correct: - correct_xs = [] - correct_ys = [] - elif isinstance(correct[0], (tuple, list)): - # We were given a list of lists of ints - correct_xs, correct_ys = correct - else: - # Assume it is a singular list - correct_xs = list(range(len(correct))) - correct_ys = correct - - if given['type'] == 'hist': - return correct_ys == given['values'] - elif plt_type == 'hist': - return correct_ys == given['y'] - else: - return correct_xs == given['x'] and correct_ys == given['y'] - - -GRAPH_TYPES = {'line': 'line plot', - 'hist': 'histogram', - 'scatter': 'scatter plot'} - - -def check_for_plot(plt_type, data): - """ - Returns any errors found for this plot type and data. - In other words, if it returns False, the plot was found correctly. - """ - if plt_type == 'plot': - plt_type = 'line' - type_found = False - data_found = False - for graph in compatibility.get_plots(): - for a_plot in graph['data']: - data_found_here = compare_data(plt_type, data, a_plot) - if a_plot['type'] == plt_type and data_found_here: - return False - if a_plot['type'] == plt_type: - type_found = True - if data_found_here: - data_found = True - plt_type = GRAPH_TYPES.get(plt_type, plt_type) - if type_found and data_found: - return ("You have created a {}, but it does not have the right data. That data appears to have been plotted " - "in another graph.

    (other_plt)

    ".format(plt_type)) - elif type_found: - return ("You have created a {}, but it does not have the right data." - "

    (wrong_plt_data)

    ".format(plt_type)) - elif data_found: - return ("You have plotted the right data, but you appear to have not plotted it as a {}." - "

    (wrong_plt_type)

    ".format(plt_type)) - else: - return ("You have not created a {} with the proper data." - "

    (no_plt)

    ".format(plt_type)) - - -def check_for_plot_r(plt_type, data): - """ - Returns any errors found for this plot type and data. - In other words, if it returns False, the plot was found correctly. - """ - if plt_type == 'plot': - plt_type = 'line' - type_found = False - data_found = False - for graph in compatibility.get_plots(): - for a_plot in graph['data']: - data_found_here = compare_data(plt_type, data, a_plot) - if a_plot['type'] == plt_type and data_found_here: - return False - if a_plot['type'] == plt_type: - type_found = True - if data_found_here: - data_found = True - plt_type = GRAPH_TYPES.get(plt_type, plt_type) - if type_found and data_found: - return {"message": "You have created a {}, but it does not have the right data. " - "That data appears to have been plotted in another graph.".format(plt_type), - "code": "other_plt", - "label": "Plotting Another Graph"} - elif type_found: - return {"message": "You have created a {}, but it does not have the right data.".format(plt_type), - "code": "wrong_plt_data", - "label": "Plot Data Incorrect"} - elif data_found: - return {"message": "You have plotted the right data, but you appear to have not plotted it as a {}.".format( - plt_type), - "code": "wrong_plt_type", - "label": "Wrong Plot Type" - } - else: - return {"message": "You have not created a {} with the proper data.".format(plt_type), - "code": "no_plt", - "label": "Missing Plot"} diff --git a/src/lib/pedal/toolkit/printing.py b/src/lib/pedal/toolkit/printing.py deleted file mode 100644 index a0191fc2dc..0000000000 --- a/src/lib/pedal/toolkit/printing.py +++ /dev/null @@ -1,22 +0,0 @@ -from pedal.report.imperative import gently_r -from pedal.toolkit.utilities import find_function_calls, is_top_level - - -def ensure_prints(count): - prints = find_function_calls('print') - if not prints: - gently_r("You are not using the print function!", "no_print", label="Missing Print") - return False - elif len(prints) > count: - gently_r("You are printing too many times!", "multiple_print", label="Too Many Prints") - return False - elif len(prints) < count: - gently_r("You are not printing enough things!", "too_few_print", label="Too Few Prints") - return False - else: - for a_print in prints: - if not is_top_level(a_print): - gently_r("You have a print function that is not at the top level. That is incorrect for this problem!", - "not_top_level_print", label="Non-Top Level Print") - return False - return prints diff --git a/src/lib/pedal/toolkit/records.py b/src/lib/pedal/toolkit/records.py deleted file mode 100644 index de58f567f1..0000000000 --- a/src/lib/pedal/toolkit/records.py +++ /dev/null @@ -1,32 +0,0 @@ -from pedal.report.imperative import gently, explain, gently_r, explain_r, MAIN_REPORT -from pedal.sandbox import compatibility - - -def check_record_instance(record_instance, record_type, instance_identifier, type_identifier): - if not isinstance(record_instance, dict): - explain("{} was not a {} because it is not a dictionary.".format(instance_identifier, type_identifier)) - return False - for expected_key, expected_value_type in record_type.items(): - if expected_key not in record_instance: - explain("{} was supposed to have the key `{}`, but it did not.".format(instance_identifier, expected_key)) - return False - actual_value = record_instance[expected_key] - # Handle nested record types - if isinstance(expected_value_type, list): - if not isinstance(actual_value, list): - explain("{} was not a {} because its key `{}` did not have a list.".format( - instance_identifier, type_identifier, expected_key - )) - return False - elif actual_value: - actual_value = actual_value[0] - expected_value_type = expected_value_type[0] - if not isinstance(actual_value, expected_value_type): - explain("{} was not a {} because its key `{}` did not have a `{}` value".format( - instance_identifier, type_identifier, expected_key, expected_value_type.__name__ - )) - return False - if len(record_type) != len(record_instance): - explain("{} had extra keys that it should not have.".format(instance_identifier)) - return False - return True diff --git a/src/lib/pedal/toolkit/signatures.py b/src/lib/pedal/toolkit/signatures.py deleted file mode 100644 index 94c0975dd9..0000000000 --- a/src/lib/pedal/toolkit/signatures.py +++ /dev/null @@ -1,426 +0,0 @@ -import ast -import re - -from pedal.cait.cait_api import parse_program -from pedal.cait.cait_node import CaitNode -from pedal.report.imperative import gently, explain - -""" -Verify indentation - -Format: - - -Any number of text. One final newline separates the next section. - -If line is "Args:" or "Returns:" - Next line will be a "param (type): Description" or "type: Description" - If the next line is indented more than current level, then it is part of the previous part's description. - Otherwise, new entry - -"Note:" - Any level of indentation indicates -""" - -PRIMITIVES = { - 'text': ['text'], - 'str': ['string', 'str', 'unicode'], - 'bytes': ['bytes'], - 'io': ['io'], - 'file': ['file'], - 'num': ['number', 'num', 'numeric'], - 'int': ['int', 'integer'], - 'float': ['float', 'floating'], - 'bool': ['bool', 'boolean'], - 'none': ['none'], - 'any': ['any'] -} -NORMALIZE_PRIMITIVES = {synonym: formal - for formal, synonyms in PRIMITIVES.items() - for synonym in synonyms} -CONTAINERS = { - 'list': (1, ['list']), - 'set': (1, ['set']), - 'optional': (1, ['optional', 'maybe']), - 'dict': (2, ['dict', 'dictionary']), - 'callable': (2, ['callable', 'function', 'func']), - 'union': ('*', ['union', 'itemization']), - 'tuple': ('*', ['tuple', 'pair']), -} -NORMALIZE_CONTAINERS = {synonym: formal - for formal, (length, synonyms) in CONTAINERS.items() - for synonym in synonyms} - -INHERITANCE = { - 'int': 'num', - 'float': 'num', - 'bool': 'num', - 'str': 'text', - 'bytes': 'text', - 'list': 'iterable', - 'tuple': 'iterable', - 'set': 'iterable', - 'dict': 'iterable', - 'file': 'iterable', - 'text': 'iterable' -} - -SPECIAL_PARAMETERS = ["_returns", "yields", "prints", "_raises", - "_report", "_root"] - -''' -Type validation: - Caps does not matter - Primitives: - Containers - Unions - X or Y - X, Y, or Z - X, Y, Z - Function - (X -> Y) - - list[int, str, or bool], dict[int: str], or bool or int -''' - - -def parse_type_slice(slice): - if slice.ast_name == "Index": - return parse_type(slice.value) - elif slice.ast_name == "Slice": - return "{}:{}".format(parse_type(slice.lower), parse_type(slice.upper)) - elif slice.ast_name == "ExtSlice": - return ", ".join(parse_type_slice(s) for s in slice.dims) - - -def parse_type(node): - if node == None: - return "Any" - if node.ast_name == "Str": - try: - return parse_type(ast.parse(node.s).body[0].value) - except: - return node.s - elif node.ast_name == "Name": - return node.id - elif node.ast_name == "NameConstant": - return node.value - elif node.ast_name == "List": - return "[{}]".format(", ".join([parse_type(n) for n in node.elts])) - elif node.ast_name == "Dict": - return "{" + (", ".join(["{}: {}".format(parse_type(k), parse_type(v)) - for k, v in zip(node.keys, node.values)])) + "}" - elif node.ast_name == "Subscript": - return parse_type(node.value) + "[{}]".format(parse_type_slice(node.slice)) - elif node.ast_name == "BoolOp": - if node.op.ast_name == "Or": - return " or ".join(parse_type(v) for v in node.values) - return "?" - - -def parse_type_value(value, parse_strings=False): - if isinstance(value, str): - if parse_strings: - return parse_type(CaitNode(ast.parse(value).body[0].value)) - else: - return repr(value) - elif value in (int, str, bool, float, list, dict, object): - return value.__name__ - elif value is None: - return "None" - elif isinstance(value, list): - if value == []: - return "[]" - else: - return "[{}]".format(parse_type_value(value[0])) - elif isinstance(value, tuple): - if value == (): - return "()" - else: - return "({})".format("".join(["{}, ".format(parse_type_value(v)) - for v in value])) - elif isinstance(value, dict): - if value == {}: - return "{}" - else: - return "{" + (", ".join(["{}: {}".format(parse_type_value(k), parse_type_value(v)) - for k, v in value.items()])) + "}" - - -def test_type_equality(left, right): - return left == right - - -class SignatureException(Exception): - pass - - -class Stack: - def __init__(self, identifier="union"): - self.body = [] - self.identifier = identifier - - def append(self, value): - self.body.append(value) - - def __repr__(self): - return "{}[{}]".format(self.identifier, ", ".join(map(repr, self.body))) - - def __hash__(self): - return hash(tuple(self.identifier, self.body)) - - def __lt__(self, other): - if isinstance(other, Stack): - return self.identifier < other.identifier and self.body < other.body - return self.identifier < other - - def __gt__(self, other): - if isinstance(other, Stack): - return self.identifier > other.identifier and self.body > other.body - return self.identifier > other - - def __eq__(self, other): - if isinstance(other, Stack): - return self.identifier == other.identifier and self.body == other.body - return False - - -def _normalize_identifier(identifier): - if identifier in NORMALIZE_PRIMITIVES: - return NORMALIZE_PRIMITIVES[identifier] - elif identifier in NORMALIZE_CONTAINERS: - return NORMALIZE_CONTAINERS[identifier] - else: - return identifier - - -SPECIAL_SYMBOLS = r"\s*(->|\s*[\[\],\(\)\:\{\}]|or)\s*" - - -def _parse_tokens(tokens): - result_stack = [Stack()] - tokens = list(reversed(list(tokens))) - while tokens: - current = tokens.pop() - # Ending a parenthetical, better stop here. - if current == ")": - subexpression = result_stack.pop() - result_stack[-1].append(subexpression) - # Ending a square bracket, better stop here. - elif current == "]": - subexpression = result_stack.pop() - result_stack[-1].append(subexpression) - # Ending a curly bracket, better stop here. - elif current == "}": - subexpression = result_stack.pop() - result_stack[-1].append(subexpression) - # We've reached the last token! - elif not tokens: - # And had no tokens before this one - # Return the set of tokens - result_stack[-1].append(_normalize_identifier(current)) - # Starting a parentheized expression - elif current == "(": - result_stack.append(Stack()) - elif current == "[": - result_stack.append(Stack("list")) - elif current == "{": - result_stack.append(Stack("dict")) - # Nullary function - elif current == "->": - result_stack[-1].append(Stack("callable")) - elif current in ("or", ",", ":"): - pass - else: - next = tokens.pop() - # X or ... - if current == "," and next == "or": - tokens.append(next) - if next in ("or", ",", "->", ":"): - result_stack[-1].append(_normalize_identifier(current)) - # X [ ... - elif next == "[": - result_stack.append(Stack(_normalize_identifier(current))) - else: - tokens.append(next) - result_stack[-1].append(_normalize_identifier(current)) - return result_stack.pop() - - -def sort_stacks(s): - if isinstance(s, Stack): - return (True, (s.identifier, s.body)) - return (False, s) - - -def normalize_type(t): - t = t.strip() - tokens = re.split(SPECIAL_SYMBOLS, t) - tokens = [token for token in tokens if token] - parsed = _parse_tokens(tokens) - return parsed - - -def check_piece(left, right, indent=1): - if type(left) != type(right): - return False - elif isinstance(left, Stack): - if left.identifier != right.identifier: - return False - elif len(left.body) != len(right.body): - return False - elif left.identifier == "union": - # Handle them in any order - left.body.sort(key=sort_stacks) - right.body.sort(key=sort_stacks) - # Match them in exact order - for l, r in zip(left.body, right.body): - if not check_piece(l, r, indent=indent + 1): - return False - return True - else: - return left == right - - -def type_check(left, right): - left = normalize_type(left) - right = normalize_type(right) - return check_piece(left, right) - - -def find_colon(str): - parens_stack = [] - for i, character in enumerate(str): - if character in '[(': - parens_stack.append(character) - elif character in '])': - parens_stack.pop() - elif character == ':' and not parens_stack: - return i - return 0 - - -ARGS = ('args:', 'arg:', 'argument:', 'arguments:', - 'parameters:', 'params:', 'parameter:', 'param:') -ARG_PATTERN = r'(.+)\s*\((.+)\)\s*:(.+)' -RETURNS = ('returns:', 'return:') - - -def parse_docstring(doc): - # First line's indentation may be different from rest - trust first - # non empty line after the first one. - # Remove taht number of spaces from subsequent lines - # If Line is "Args:" or other special... - # - lines = doc.split("\n") - body = [lines[0]] - args = {} - current_arg = None - returns = [] - current_component = 'body' - indentation = None - inner_indentation = None - for line in lines[1:]: - # Blank line, not interesting! - if not line.strip(): - continue - # Get the actual text - if indentation is None: - indentation = len(line) - len(line.lstrip()) - line = line[indentation:] - potential_command = line.lower().strip() - # New command region? - if potential_command in ARGS: - current_component = 'args' - inner_indentation = None - continue - elif potential_command in RETURNS: - current_component = 'returns' - inner_indentation = None - continue - # Okay, it's content - let's process it - if current_component == 'body': - body.append(line) - else: - if inner_indentation is None: - inner_indentation = len(line) - len(line.lstrip()) - line = line[inner_indentation:] - # Skip indented lines - if not re.match(r'\s', line): - if current_component == 'args': - match = re.search(ARG_PATTERN, line) - current_arg = match.group(1) - type_str = match.group(2) - args[current_arg.strip()] = type_str.strip() - elif current_component == 'returns': - position = find_colon(line) - return_type, comment = line[:position], line[position:] - returns.append(return_type.strip()) - return body, args, ' or '.join(returns) - - -def function_signature(function_name, returns=None, yields=None, - prints=None, raises=None, report=None, root=None, - **kwargs): - """ - Determines whether the function with this signature is in the AST. - - TODO: Implement raises, prints, yields - """ - if root is None: - root = parse_program() - # If you encounter any special parameters with a "_", then fix their - # name. This allows for students to have parameters with the given name. - for special_parameter in SPECIAL_PARAMETERS: - if special_parameter in kwargs: - kwargs[special_parameter[1:]] = kwargs.pop(special_parameter) - # Go get the actual docstring, parse it - docstring = None - for function_def in root.find_all("FunctionDef"): - if function_def._name == function_name: - if function_def.body: - if (function_def.body[0].ast_name == "Expr" and - function_def.body[0].value.ast_name == "Str"): - docstring = function_def.body[0].value.s - # Try to match each element in turn. - if docstring is None: - return False - - try: - body, args, parsed_returns = parse_docstring(docstring) - except Exception as e: - return [e], False - failing_parameters = [] - for name, type in kwargs.items(): - if name in args: - if not type_check(type, args[name]): - failing_parameters.append(name) - else: - failing_parameters.append(name) - if returns is None and not returns: - return failing_parameters, True - elif returns is not None and returns: - return failing_parameters, type_check(parsed_returns, returns) - else: - return failing_parameters, False - - -def class_signature(class_name, report=None, root=None, **attributes): - """ - - Args: - class_name: - **attributes: - report: - root: - - Returns: - - """ - if root is None: - root = parse_program() - - -""" - -""" diff --git a/src/lib/pedal/toolkit/upload.py b/src/lib/pedal/toolkit/upload.py deleted file mode 100644 index 54993edd59..0000000000 --- a/src/lib/pedal/toolkit/upload.py +++ /dev/null @@ -1,54 +0,0 @@ -import re -from pedal.source import get_program -from pedal.sandbox.compatibility import get_output -from pedal.report.imperative import gently_r, explain_r - - -# Feedback for author's name -def check_author_name_on_header(): - code = get_program() - m_author = re.search('Author: \\w+', code) - if not m_author: - gently_r("You need to add your name to the author field at the top of the file.", "name_missing", - label="Missing Name") - - -def get_plots(output): - # The p[0] is the first plot in a graph/show - return [p[0] for p in output if isinstance(p[0], dict)] - - -def find_plot_of_type(plot_list, plot_type): - return [p['data'] for p in plot_list if p['type'] == plot_type] - - -# Feedback for copying output of the program in the documentation -def check_output_on_header(expected_output): - code = get_program() - expected_output = str(expected_output) - between_stars = code.split("*****")[2].strip() - between_stars = "\\n".join([x.strip() for x in between_stars.split("\\n")]) - if 'REPLACE THIS TEXT WITH THE OUTPUT OF THIS PROGRAM' in between_stars: - gently_r("In your code, you need to 'REPLACE THIS TEXT WITH THE OUTPUT OF THIS PROGRAM'", "wrong_output_blank", - label="Blank Output") - elif expected_output not in between_stars: - gently_r("The output you copied between the *****, seems to be incorrect. " - "You may have copied it into the wrong location, or it is incomplete.", "wrong_output_fill", label="") - - -def check_problem_submission(prob_id): - if prob_id not in get_program(): - explain_r("Make sure that you are turning in {}
    ".format(prob_id), "wrong_problem", label="Wrong Problem") - return True - - -def check_print_output(multiple_lines): - for line in multiple_lines: - if line not in get_output(): - gently_r("You are not doing the correct calculation
    ", "catch_all", label="Wrong Output") - return True - - -def find_in_code(regex): - code = get_program() - return re.search(regex, code) diff --git a/src/lib/pedal/toolkit/utilities.py b/src/lib/pedal/toolkit/utilities.py deleted file mode 100644 index 7e5b4fe23a..0000000000 --- a/src/lib/pedal/toolkit/utilities.py +++ /dev/null @@ -1,361 +0,0 @@ -from pedal.cait.cait_api import parse_program -from pedal.report.imperative import gently, explain -from pedal.report.imperative import gently_r, explain_r - - -def is_top_level(ast_node): - ast = parse_program() - for element in ast.body: - if element.ast_name == 'Expr': - if element.value == ast_node: - return True - elif element == ast_node: - return True - return False - - -def no_nested_function_definitions(): - ast = parse_program() - defs = ast.find_all('FunctionDef') - for a_def in defs: - if not is_top_level(a_def): - gently("You have defined a function inside of another block. For instance, you may have placed it inside " - "another function definition, or inside of a loop. Do not nest your function definition!" - "

    (nest_func)

    ") - return False - return True - - -def function_prints(): - ast = parse_program() - defs = ast.find_all('FunctionDef') - for a_def in defs: - all_calls = a_def.find_all('Call') - for a_call in all_calls: - if a_call.func.ast_name == 'Name': - if a_call.func.id == 'print': - return True - return False - - -def find_function_calls(name, root=None): - if root is None: - root = parse_program() - all_calls = root.find_all('Call') - calls = [] - for a_call in all_calls: - if a_call.func.ast_name == 'Attribute': - if a_call.func.attr == name: - calls.append(a_call) - elif a_call.func.ast_name == 'Name': - if a_call.func.id == name: - calls.append(a_call) - return calls - - -def function_is_called(name): - return len(find_function_calls(name)) - - -def no_nonlist_nums(): - pass - - -def only_printing_variables(): - ast = parse_program() - all_calls = ast.find_all('Call') - for a_call in all_calls: - if a_call.func.ast_name == 'Name' and a_call.func.id == "print": - for arg in a_call.args: - if arg.ast_name != "Name": - return False - elif arg.id in ('True', 'False', 'None'): - return False - return True - - -def find_prior_initializations(node): - if node.ast_name != "Name": - return None - ast = parse_program() - assignments = ast.find_all("Assign") - cur_line_no = node.lineno - all_assignments = [] - for assignment in assignments: - if assignment.has(node): - if assignment.lineno < cur_line_no: - all_assignments.append(assignment) - return all_assignments - - -def prevent_unused_result(): - ast = parse_program() - exprs = ast.find_all('Expr') - for expr in exprs: - if expr.value.ast_name == "Call": - a_call = expr.value - if a_call.func.ast_name == 'Attribute': - if a_call.func.attr == 'append': - pass - elif a_call.func.attr in ('replace', 'strip', 'lstrip', 'rstrip'): - gently("Remember! You cannot modify a string directly. Instead, you should assign the result back " - "to the string variable.

    (str_mutate)

    ") - - -def prevent_builtin_usage(function_names): - message = "You cannot use the builtin function {}." - code = "builtin_use" - label = "Builtin Usage" - # Prevent direction calls - ast = parse_program() - all_calls = ast.find_all('Call') - for a_call in all_calls: - if a_call.func.ast_name == 'Name': - if a_call.func.id in function_names: - explain_r(message.format(a_call.func.id), code, label=label) - return a_call.func.id - return None - - -def find_negatives(root=None): - if root is None: - root = parse_program() - return [-op.operand.n for op in root.find_all("UnaryOp") - if op.op.ast_name == "USub" and op.operand.ast_name == "Num"] - - -# TODO: UGLY HACK. This is to avoid muted=False kwargs in the following -# functions. Apparently skulpt doesn't support this syntax. -muted = False - - -def prevent_literal(*literals): - """ - Confirms that the literal is not in the code, returning False if it is not. - - Args: - *literals (Any...): A series of literal values to look for. - Returns: - AstNode or False: If the literal is found in the code, then it is returned. - """ - message = "Do not use the literal value {} in your code." - code = "hard_code" - label = "Hard Coding" - ast = parse_program() - str_values = [s.s for s in ast.find_all("Str")] - num_values = [n.n for n in ast.find_all("Num")] - negative_values = find_negatives(ast) - name_values = ([name.id for name in ast.find_all("Name")] + - [name.value for name in ast.find_all("NameConstant")]) - for literal in literals: - if isinstance(literal, (int, float)): - if literal in num_values or literal in negative_values: - if not muted: - explain_r(message.format(repr(literal)), code, label=label) - return literal - elif isinstance(literal, str): - if literal in str_values: - if not muted: - explain_r(message.format(repr(literal)), code, label=label) - return literal - elif literal in (True, False, None): - if str(literal) in name_values: - if not muted: - explain_r(message.format(repr(literal)), code, label=label) - return literal - return False - - -def ensure_literal(*literals): - """ - Confirms that the literal IS in the code, returning False if it is not. - - Args: - *literals (Any...): A series of literal values to look for. - Returns: - AstNode or False: If the literal is found in the code, then it is returned. - """ - message = "You need the literal value {} in your code." - code = "missing_literal" - label = "Missing Literal" - ast = parse_program() - str_values = [s.s for s in ast.find_all("Str")] - num_values = [n.n for n in ast.find_all("Num")] - negative_values = find_negatives(ast) - name_values = ([str(name.id) for name in ast.find_all("Name")] + - [str(name.value) for name in ast.find_all("NameConstant")]) - for literal in literals: - if literal in (True, False, None): - if str(literal) not in name_values: - if not muted: - explain_r(message.format(repr(literal)), code, label=label) - return True - elif isinstance(literal, (int, float)): - if literal not in num_values and literal not in negative_values: - if not muted: - explain_r(message.format(repr(literal)), code, label=label) - return literal - elif isinstance(literal, str): - if literal not in str_values: - if not muted: - explain_r(message.format(repr(literal)), code, label=label) - return literal - return False - - -def prevent_advanced_iteration(): - message = "You should not use a while loop to solve this problem." - code = "while_usage" - label = "Usage of while" - ast = parse_program() - if ast.find_all('While'): - explain_r(message, code, label=label) - prevent_builtin_usage(['sum', 'map', 'filter', 'reduce', 'len', 'max', 'min', - 'max', 'sorted', 'all', 'any', 'getattr', 'setattr', - 'eval', 'exec', 'iter']) - - -COMPARE_OP_NAMES = { - "==": "Eq", - "<": "Lt", - "<=": "Lte", - ">=": "Gte", - ">": "Gt", - "!=": "NotEq", - "is": "Is", - "is not": "IsNot", - "in": "In", - "not in": "NotIn"} -BOOL_OP_NAMES = { - "and": "And", - "or": "Or"} -BIN_OP_NAMES = { - "+": "Add", - "-": "Sub", - "*": "Mult", - "/": "Div", - "//": "FloorDiv", - "%": "Mod", - "**": "Pow", - ">>": "LShift", - "<<": "RShift", - "|": "BitOr", - "^": "BitXor", - "&": "BitAnd", - "@": "MatMult"} -UNARY_OP_NAMES = { - # "+": "UAdd", - # "-": "USub", - "not": "Not", - "~": "Invert" -} - - -def ensure_operation(op_name, root=None): - message = "You are not using the {} operator.".format(op_name) - code = "missing_op" - label = "Missing {} Operator".format(op_name) - if root is None: - root = parse_program() - result = find_operation(op_name, root) - if not result: - gently_r(message, code, label) - return result - - -def prevent_operation(op_name, root=None): - message = "You may not use the {} operator.".format(op_name) - code = "bad_op" - label = "Bad Operator".format(op_name) - if root is None: - root = parse_program() - result = find_operation(op_name, root) - if result: - gently_r(message, code, label=label) - return result - - -def find_operation(op_name, root): - if op_name in COMPARE_OP_NAMES: - compares = root.find_all("Compare") - for compare in compares: - for op in compare.ops: - if op.ast_name == COMPARE_OP_NAMES[op_name]: - return compare - elif op_name in BOOL_OP_NAMES: - boolops = root.find_all("BoolOp") - for boolop in boolops: - if boolop.op_name == BOOL_OP_NAMES[op_name]: - return boolop - elif op_name in BIN_OP_NAMES: - binops = root.find_all("BinOp") - for binop in binops: - if binop.op_name == BIN_OP_NAMES[op_name]: - return binop - elif op_name in UNARY_OP_NAMES: - unaryops = root.find_all("UnaryOp") - for unaryop in unaryops: - if unaryop.op_name == UNARY_OP_NAMES[op_name]: - return unaryop - return False - - -def ensure_recursion(function_name, root=None): - if root is None: - root = parse_program() - all_calls = root.find_all('Call') - calls = [] - for a_call in all_calls: - if a_call.func.ast_name == 'Attribute': - if a_call.func.attr == function_name: - calls.append(a_call) - elif a_call.func.ast_name == 'Name': - if a_call.func.id == function_name: - calls.append(a_call) - return calls - - -def ensure_assignment(variable_name, type=None, value=None, root=None): - """ - Consumes a variable name - TODO: Implement the value parameter - - :param variable_name: The variable name the student is expected to define. - :type variable_name: str - :param type: The string type of the node on the right side of the - assignment. Check GreenTreeSnakes (e.g., "Num", or "Str"). - :type type: str - :return: False or str - """ - if root is None: - root = parse_program() - assignments = root.find_all("Assign") - potentials = [] - for assign in assignments: - if assign.targets[0].ast_name != "Name": - continue - if assign.targets[0].id == variable_name: - potentials.append(assign) - if type is None: - return assign - elif (type == 'Bool' and - assign.value.ast_name == 'Name' and - assign.value.id in ('True', 'False')): - return assign - elif (type == 'Bool' and - assign.value.ast_name == 'NameConstant' and - assign.value.value in (True, False)): - return assign - elif assign.value.ast_name == type: - return assign - if potentials and potentials[0].value.ast_name not in ("Str", "Bool", "Num", "List", "Tuple"): - explain_r(("You needed to assign a literal value to {variable}, but you " - "created an expression instead.").format(variable=variable_name), "exp_vs_lit", - label="Expression Instead of Literal") - elif type is None: - explain_r(("You have not properly assigned anything to the variable " - "{variable}.").format(variable=variable_name), "no_assign", label="No Proper Assignment") - else: - explain_r(("You have not assigned a {type} to the variable {variable}." - "").format(type=type, variable=variable_name), "type_assign", label="Unexpected Variable Type") - return False