import neo4j, json # dbprep fsroot = '/autograder/source/' datasets = ['Neo4J_dataset'] db = neo4j.GraphDatabase.driver('bolt://localhost:7687', auth = ('neo4j', '4Sfz541Lm')).session() def evaluate(query : str): query = query.strip() return db.run(query).data() if query else None while True: try: db.run('RETURN 0') break except: continue for d in datasets: with open(fsroot + d + '.txt', encoding = 'utf-8') as f: db.run(f.read()) from solution import sols answers = [evaluate(s) if type(s) is str else tuple(evaluate(k) for k in s) for s in sols ] # grading from os import listdir from importlib.util import module_from_spec, spec_from_file_location subroot = '/autograder/submission/' feedback = '' submissions = [subroot + f for f in listdir(subroot) if f.strip().lower().endswith('.py')] grade = 0 n_queries = len(sols) if submissions: submission = submissions[0] for i in range(n_queries): feedback += f'Query {i + 1}: ' try: spec = spec_from_file_location('curr', submission) module = module_from_spec(spec) spec.loader.exec_module(module) q = getattr(module, f'query{i + 1}')() def eq(a: list, b): if a is None: return False if type(b) is tuple: return any(eq(a, bb) for bb in b) if len(a) != len(b): return False return all(aa == bb for aa, bb in zip(a, b)) if eq(evaluate(q), answers[i]): grade += 1 feedback += 'Correct.\n' else: feedback += 'Wrong Answer.\n' except Exception: feedback += 'Runtime Error.\n' else: feedback += 'No python file in submission.\n' # output results = { 'output': feedback, 'score': round(grade * 100 / n_queries, 1), 'max_score': 100, } with open('/autograder/results/results.json', 'w') as res: json.dump(results, res)