|
|
|
import neo4j, json
|
|
|
|
|
|
|
|
# dbprep
|
|
|
|
fsroot = '/autograder/source/'
|
|
|
|
datasets = ['Neo4J_dataset']
|
|
|
|
db = neo4j.GraphDatabase.driver('bolt://localhost:7687', auth = ('neo4j', '4Sfz541Lm')).session()
|
|
|
|
|
|
|
|
def postproc_str(data : str): # relaxed str matching
|
|
|
|
import re
|
|
|
|
return re.sub(r'[\s|_]', '', data.lower())
|
|
|
|
|
|
|
|
def comparator(a, b):
|
|
|
|
cmp = lambda x, y: 1 if x < y else -1 if x > y else 0
|
|
|
|
try:
|
|
|
|
return cmp(a, b)
|
|
|
|
except Exception as e:
|
|
|
|
from collections.abc import Iterable
|
|
|
|
def itcmp(a: Iterable, b: Iterable):
|
|
|
|
if len(a) < len(b):
|
|
|
|
return -1
|
|
|
|
elif len(a) == len(b):
|
|
|
|
for aa, bb in zip(a, b):
|
|
|
|
cmp = comparator(aa, bb)
|
|
|
|
if cmp != 0:
|
|
|
|
return cmp
|
|
|
|
else: return 1
|
|
|
|
return 0
|
|
|
|
|
|
|
|
match (a, b):
|
|
|
|
case (dict(), dict()):
|
|
|
|
return itcmp([*a.keys(), *a.values()], [*b.keys(), *b.values()])
|
|
|
|
case (Iterable(), Iterable()):
|
|
|
|
return itcmp(a, b)
|
|
|
|
case _ if type(a) == type(b):
|
|
|
|
return cmp(f'{a}', f'{b}')
|
|
|
|
case _:
|
|
|
|
return cmp(hash(type(a)), hash(type(b)))
|
|
|
|
|
|
|
|
def postproc_iter(data):
|
|
|
|
from collections.abc import Iterable
|
|
|
|
from functools import cmp_to_key
|
|
|
|
try:
|
|
|
|
match data:
|
|
|
|
case str():
|
|
|
|
return postproc_str(data)
|
|
|
|
case dict():
|
|
|
|
return { postproc_iter(k):postproc_iter(v) for k, v in data.items() }
|
|
|
|
case Iterable(): # flatten, remove order and empty iterables
|
|
|
|
res = type(data)(
|
|
|
|
sorted(
|
|
|
|
[postproc_iter(d) for d in data
|
|
|
|
if not isinstance(d, Iterable) or d]
|
|
|
|
, key = cmp_to_key(comparator))
|
|
|
|
)
|
|
|
|
return res[0] if len(res) == 1 else res
|
|
|
|
case _: # primitives
|
|
|
|
return data
|
|
|
|
except Exception as e: # fail proof
|
|
|
|
print(e)
|
|
|
|
return data
|
|
|
|
|
|
|
|
def evaluate(query : str):
|
|
|
|
query = query.strip()
|
|
|
|
return postproc_iter(db.run(query).data()) if query else None
|
|
|
|
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
db.run('RETURN 0')
|
|
|
|
break
|
|
|
|
except:
|
|
|
|
continue
|
|
|
|
|
|
|
|
for d in datasets:
|
|
|
|
with open(fsroot + d + '.txt', encoding = 'utf-8') as f:
|
|
|
|
db.run(f.read())
|
|
|
|
|
|
|
|
from solution import sols
|
|
|
|
answers = [evaluate(s) if type(s) is str else tuple(evaluate(k) for k in s) for s in sols ]
|
|
|
|
|
|
|
|
# grading
|
|
|
|
from os import listdir
|
|
|
|
from importlib.util import module_from_spec, spec_from_file_location
|
|
|
|
subroot = '/autograder/submission/'
|
|
|
|
feedback = ''
|
|
|
|
submissions = [subroot + f for f in listdir(subroot) if f.strip().lower().endswith('.py')]
|
|
|
|
|
|
|
|
grade = 0
|
|
|
|
n_queries = len(sols)
|
|
|
|
|
|
|
|
if submissions:
|
|
|
|
submission = submissions[0]
|
|
|
|
|
|
|
|
for i in range(n_queries):
|
|
|
|
feedback += f'Query {i + 1}: '
|
|
|
|
try:
|
|
|
|
spec = spec_from_file_location('curr', submission)
|
|
|
|
module = module_from_spec(spec)
|
|
|
|
spec.loader.exec_module(module)
|
|
|
|
q = getattr(module, f'query{i + 1}')()
|
|
|
|
def eq(a: list, b):
|
|
|
|
if type(b) is tuple:
|
|
|
|
return any(eq(a, bb) for bb in b)
|
|
|
|
else: return a == b
|
|
|
|
ans = evaluate(q)
|
|
|
|
if eq(ans, answers[i]):
|
|
|
|
grade += 1
|
|
|
|
feedback += 'Correct.\n'
|
|
|
|
else:
|
|
|
|
feedback += 'Wrong Answer.\n'
|
|
|
|
print('ans: ', ans, '\nsol: ', answers[i])
|
|
|
|
except Exception as e:
|
|
|
|
feedback += 'Runtime Error.\n'
|
|
|
|
print(e)
|
|
|
|
else:
|
|
|
|
feedback += 'No python file in submission.\n'
|
|
|
|
|
|
|
|
# output
|
|
|
|
results = {
|
|
|
|
'output': feedback,
|
|
|
|
'score': round(grade * 100 / n_queries, 1),
|
|
|
|
'max_score': 100,
|
|
|
|
}
|
|
|
|
|
|
|
|
with open('/autograder/results/results.json', 'w') as res:
|
|
|
|
json.dump(results, res)
|