|
|
|
import pymongo, json
|
|
|
|
|
|
|
|
# dbprep
|
|
|
|
fsroot = '/autograder/source/'
|
|
|
|
datasets = ['congress', 'bills']
|
|
|
|
db = pymongo.MongoClient('mongodb://127.0.0.1')['test']
|
|
|
|
|
|
|
|
def postproc_str(data : str): # relaxed str matching
|
|
|
|
import re
|
|
|
|
return re.sub(r'[\s|_]', '', data.lower())
|
|
|
|
|
|
|
|
def comparator(a, b):
|
|
|
|
cmp = lambda x, y: 1 if x < y else -1 if x > y else 0
|
|
|
|
try:
|
|
|
|
return cmp(a, b)
|
|
|
|
except Exception as e:
|
|
|
|
from collections.abc import Iterable
|
|
|
|
def itcmp(a: Iterable, b: Iterable):
|
|
|
|
if len(a) < len(b):
|
|
|
|
return -1
|
|
|
|
elif len(a) == len(b):
|
|
|
|
for aa, bb in zip(a, b):
|
|
|
|
cmp = comparator(aa, bb)
|
|
|
|
if cmp != 0:
|
|
|
|
return cmp
|
|
|
|
else: return 1
|
|
|
|
return 0
|
|
|
|
from bson import ObjectId
|
|
|
|
|
|
|
|
match (a, b):
|
|
|
|
case (dict(), dict()):
|
|
|
|
return itcmp([*a.keys(), *a.values()], [*b.keys(), *b.values()])
|
|
|
|
case (ObjectId(aa), ObjectId(bb)):
|
|
|
|
return cmp(aa, bb)
|
|
|
|
case (Iterable(), Iterable()):
|
|
|
|
return itcmp(a, b)
|
|
|
|
case _ if type(a) == type(b):
|
|
|
|
return cmp(f'{a}', f'{b}')
|
|
|
|
case _:
|
|
|
|
return cmp(hash(type(a)), hash(type(b)))
|
|
|
|
|
|
|
|
def postproc_iter(data):
|
|
|
|
from collections.abc import Iterable
|
|
|
|
from functools import cmp_to_key
|
|
|
|
try:
|
|
|
|
match data:
|
|
|
|
case str():
|
|
|
|
return postproc_str(data)
|
|
|
|
case dict():
|
|
|
|
return { postproc_iter(k):postproc_iter(v) for k, v in data.items() }
|
|
|
|
case Iterable(): # flatten, remove order and empty iterables
|
|
|
|
res = type(data)(
|
|
|
|
sorted(
|
|
|
|
[postproc_iter(d) for d in data
|
|
|
|
if not isinstance(d, Iterable) or d]
|
|
|
|
, key = cmp_to_key(comparator))
|
|
|
|
)
|
|
|
|
return res[0] if len(res) == 1 else res
|
|
|
|
case _: # primitives
|
|
|
|
return data
|
|
|
|
except Exception as e: # fail proof
|
|
|
|
print(e)
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
|
|
|
def evaluate(query : str):
|
|
|
|
import re
|
|
|
|
query = re.sub(r'(\$?[\d\w_]+)\s*:', r'"\1" :', query)
|
|
|
|
query = re.sub(r'[\r|\n]|.\s*pretty\s*\(\s*\)', '', query).strip()
|
|
|
|
if query.endswith(';'): query = query[:-1]
|
|
|
|
return postproc_iter(list(eval(query))) if query else None
|
|
|
|
|
|
|
|
for d in datasets:
|
|
|
|
with open(fsroot + d + '.json', encoding = 'utf-8') as f:
|
|
|
|
db[d].insert_many(json.load(f))
|
|
|
|
|
|
|
|
from solution import sols
|
|
|
|
answers = [evaluate(s) for s in sols]
|
|
|
|
|
|
|
|
# grading
|
|
|
|
from os import listdir
|
|
|
|
from importlib.util import module_from_spec, spec_from_file_location
|
|
|
|
subroot = '/autograder/submission/'
|
|
|
|
feedback = ''
|
|
|
|
submissions = [subroot + f for f in listdir(subroot) if f.strip().lower().endswith('.py')]
|
|
|
|
|
|
|
|
grade = 0
|
|
|
|
n_queries = len(sols)
|
|
|
|
|
|
|
|
if submissions:
|
|
|
|
submission = submissions[0]
|
|
|
|
|
|
|
|
for i in range(n_queries):
|
|
|
|
feedback += f'Query {i + 1}: '
|
|
|
|
try:
|
|
|
|
spec = spec_from_file_location('curr', submission)
|
|
|
|
module = module_from_spec(spec)
|
|
|
|
spec.loader.exec_module(module)
|
|
|
|
q = getattr(module, f'query{i + 1}')()
|
|
|
|
ans = evaluate(q)
|
|
|
|
if ans == answers[i]:
|
|
|
|
grade += 1
|
|
|
|
feedback += 'Correct.\n'
|
|
|
|
else:
|
|
|
|
feedback += 'Wrong Answer.\n'
|
|
|
|
print('ans: ', ans, '\nsol: ', answers[i])
|
|
|
|
except Exception as e:
|
|
|
|
feedback += 'Runtime Error.\n'
|
|
|
|
print (e)
|
|
|
|
else:
|
|
|
|
feedback += 'No python file in submission.\n'
|
|
|
|
|
|
|
|
# output
|
|
|
|
results = {
|
|
|
|
'output': feedback,
|
|
|
|
'score': grade * 100 / n_queries,
|
|
|
|
'max_score': 100,
|
|
|
|
}
|
|
|
|
|
|
|
|
with open('/autograder/results/results.json', 'w') as res:
|
|
|
|
json.dump(results, res)
|