You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
autograders/neo4j/source/autograder.py

152 lines
4.7 KiB

7 months ago
import neo4j, json, re
print('ver: 1.33')
# dbprep
fsroot = '/autograder/source/'
datasets = ['databaseCreateQueries']
db = neo4j.GraphDatabase.driver('bolt://localhost:7687', auth = ('neo4j', '4Sfz541Lm')).session()
points = (5,)*10
def postproc_str(data : str): # relaxed str matching
return re.sub(r'[\s|_|.]', '', data.lower())
def postproc_keys(data : str):
if type(data) is not str:
return postproc_iter(data)
_dict = {r'count\(\*\)': 'cnt',
r'efamily': 'family',
r'sname': 'aname',
r'size\(continents\)': 'numberofcontinent'}
data = postproc_str(data)
for k, v in _dict.items():
data = re.sub(k, v, data)
return data
def comparator(a, b):
cmp = lambda x, y: 1 if x < y else -1 if x > y else 0
try:
return cmp(a, b)
except Exception:
from collections.abc import Iterable
def itcmp(a: Iterable, b: Iterable):
if len(a) < len(b):
return -1
elif len(a) == len(b):
for aa, bb in zip(a, b):
c = comparator(aa, bb)
if c != 0:
return c
else: return 1
return 0
match (a, b):
case (dict(), dict()):
return itcmp([*a.keys(), *a.values()], [*b.keys(), *b.values()])
case (Iterable(), Iterable()):
return itcmp(a, b)
case _ if type(a) == type(b):
return cmp(f'{a}', f'{b}')
case _:
return cmp(hash(type(a)), hash(type(b)))
def postproc_iter(data, naive = False):
from collections.abc import Iterable
from functools import cmp_to_key
try:
match data:
case str():
return postproc_str(data)
case dict():
return {
(i if naive else postproc_keys(k)) : postproc_iter(v, naive)
for i, (k, v) in enumerate(data.items())
}
case Iterable(): # flatten, remove order and empty iterables
res = type(data)(
sorted(
[postproc_iter(d, naive) for d in data
if not isinstance(d, Iterable) or d]
, key = cmp_to_key(comparator))
)
return res[0] if len(res) == 1 else res
case _: # primitives
return data
except Exception as e: # fail proof
print(e)
return data
def evaluate(query : str):
query = query.strip()
return [postproc_iter(db.run(query).data(), n)
if query else None for n in (False, True)]
while True:
try:
db.run('RETURN 0')
break
except:
continue
for d in datasets:
with open(fsroot + d + '.txt', encoding = 'utf-8') as f:
db.run(f.read())
from solution import sols
answers = [evaluate(s) if type(s) is str else tuple(evaluate(k) for k in s) for s in sols ]
# grading
from os import listdir
from importlib.util import module_from_spec, spec_from_file_location
subroot = '/autograder/submission/'
feedback = ''
submissions = [subroot + f for f in listdir(subroot) if f.strip().lower().endswith('.py')]
grade = 0
n_queries = len(sols)
wa = rte = False
if submissions:
submission = submissions[0]
for i in range(n_queries):
feedback += f'Query {i + 1}: '
try:
spec = spec_from_file_location('curr', submission)
module = module_from_spec(spec)
spec.loader.exec_module(module)
q = getattr(module, f'query{i + 1}')()
def eq(a: list, b):
if type(b) is tuple:
return any(eq(a, bb) for bb in b)
else:
return any(aa == bb for aa, bb in zip(a, b))
ans = evaluate(q)
if eq(ans, answers[i]):
grade += points[i]
feedback += 'Correct.\n'
else:
wa = True
feedback += 'Wrong Answer.\n'
print('ans: ', ans, '\nsol: ', answers[i])
except Exception as e:
rte = True
feedback += 'Runtime Error.\n'
print(e)
else:
feedback += 'No python file in submission.\n'
if rte:
feedback += ('\nPlease check for syntax errors if you encountered runtime errors.\n' +
'If you believe it\'s a mistake, contact the TA at sun1226@purdue.edu for assistance.')
# output
results = {
'output': feedback,
'score': grade,
'max_score': sum(points),
}
with open('/autograder/results/results.json', 'w') as res:
json.dump(results, res)