import pymongo, json # dbprep fsroot = '/autograder/source/' datasets = ['congress', 'bills'] db = pymongo.MongoClient('mongodb://127.0.0.1')['test'] def evaluate(query : str): import re query = re.sub(r'(\$?[\d\w]+)\s*:', r'"\1" :', query) query = re.sub(r'[\r|\n]|.\s*pretty\s*\(\s*\)', '', query).strip() return list(eval(query))[0] if query else None for d in datasets: with open(fsroot + d + '.json', encoding = 'utf-8') as f: db[d].insert_many(json.load(f)) from solution import sols answers = [evaluate(s) for s in sols] # grading from os import listdir from importlib.util import module_from_spec, spec_from_file_location subroot = '/autograder/submission/' feedback = '' submissions = [subroot + f for f in listdir(subroot) if f.strip().lower().endswith('.py')] grade = 0 n_queries = len(sols) if submissions: submission = submissions[0] for i in range(n_queries): feedback += f'Query {i + 1}: ' try: spec = spec_from_file_location('curr', submission) module = module_from_spec(spec) spec.loader.exec_module(module) q = getattr(module, f'query{i + 1}')() if evaluate(q) == answers[i]: grade += 1 feedback += 'Correct.\n' else: feedback += 'Wrong Answer.\n' except Exception: feedback += 'Runtime Error.\n' else: feedback += 'No python file in submission.\n' # output results = { 'output': feedback, 'score': grade * 100 / n_queries, 'max_score': 100, } with open('/autograder/results/results.json', 'w') as res: json.dump(results, res)