improved scratch space

master
Bill 2 years ago
parent 540672cdc4
commit e588e4b0dc

@ -157,4 +157,4 @@ def get_innermost(sl):
elif sl and type(sl) is list:
return get_innermost(sl[0])
else:
return sl
return sl

@ -5,6 +5,7 @@
#include "./server/gc.h"
__AQEXPORT__(void) __AQ_Init_GC__(Context* cxt) {
GC::gc_handle = static_cast<GC*>(cxt->gc);
GC::scratch_space = nullptr;
}
#else // __AQ_USE_THREADEDGC__

@ -4,8 +4,8 @@ from enum import Enum, auto
from typing import Dict, List, Optional, Set, Tuple, Union
from engine.types import *
from engine.utils import (base62alp, base62uuid, enlist, get_innermost,
get_legal_name)
from engine.utils import (base62alp, base62uuid, enlist,
get_innermost, get_legal_name)
from reconstruct.storage import ColRef, Context, TableInfo
class ast_node:
@ -599,7 +599,7 @@ class groupby_c(ast_node):
self.context.emitc(f'{c}.reserve({self.group}.size());')
if col_tovec[i]: # and type(col_types[i]) is VectorT:
typename : Types = col_types[i] # .inner_type
self.context.emitc(f'auto buf_{c} = static_cast<{typename.cname} *>(malloc({self.total_sz} * sizeof({typename.cname})));')
self.context.emitc(f'auto buf_{c} = static_cast<{typename.cname} *>(calloc({self.total_sz}, sizeof({typename.cname})));')
tovec_columns.add(c)
self.arr_len = 'arrlen_' + base62uuid(3)
self.arr_values = 'arrvals_' + base62uuid(3)
@ -617,7 +617,8 @@ class groupby_c(ast_node):
f'[{preproc_scanner_it}]);'
)
preproc_scanner.finalize()
self.context.emitc(f'GC::scratch_space = GC::gc_handle ? &(GC::gc_handle->scratch) : nullptr;')
# gscanner = scan(self, self.group, loop_style = 'for_each')
gscanner = scan(self, self.arr_len)
key_var = 'key_'+base62uuid(7)
@ -683,6 +684,7 @@ class groupby_c(ast_node):
gscanner.add(f'{ce[0]}.emplace_back({get_var_names_ex(ex)});\n')
gscanner.finalize()
self.context.emitc(f'GC::scratch_space = nullptr;')
self.datasource.groupinfo = None

@ -13,7 +13,7 @@ size_t count(const VT<T>& v) {
}
template <class T>
constexpr static inline size_t count(const T&) { return 1; }
constexpr static size_t count(const T&) { return 1; }
// TODO: Specializations for dt/str/none
template<class T, template<typename ...> class VT>
@ -441,14 +441,12 @@ inline decayed_t<VT, T> aggnext(const VT<T>& arr) {
template<class T, template<typename ...> class VT>
T last(const VT<T>& arr) {
if (!arr.size) return 0;
const uint32_t& len = arr.size;
return arr[arr.size - 1];
}
template<class T, template<typename ...> class VT>
T first(const VT<T>& arr) {
if (!arr.size) return 0;
const uint32_t& len = arr.size;
return arr[0];
}

@ -1,4 +1,5 @@
#pragma once
#include <atomic>
class ScratchSpace {
public:
@ -35,10 +36,8 @@ public:
#ifndef __AQ_USE_THREADEDGC__
#include <atomic>
class GC {
private:;
private:
size_t max_slots,
interval, forced_clean,
forceclean_timer = 0;
@ -53,7 +52,6 @@ private:;
std::atomic<uint64_t> current_size;
volatile bool lock;
using gc_deallocator_t = void (*)(void*);
ScratchSpace scratch;
// maybe use volatile std::thread::id instead
protected:
void acquire_lock();
@ -64,6 +62,7 @@ protected:
void terminate_daemon();
public:
ScratchSpace scratch;
void reg(void* v, uint32_t sz = 0xffffffff,
void(*f)(void*) = free
);
@ -92,6 +91,7 @@ public:
}
static GC* gc_handle;
static ScratchSpace *scratch_space;
template <class T>
static inline gc_deallocator_t _delete(T*) {
return [](void* v){

@ -468,6 +468,7 @@ void GC::reg(void* v, uint32_t sz, void(*f)(void*)) { //~ 40ns expected v. free
#endif
inline GC* GC::gc_handle = nullptr;
inline ScratchSpace* GC::scratch_space = nullptr;
void ScratchSpace::init(size_t initial_capacity) {
ret = nullptr;

@ -49,20 +49,25 @@ public:
this->container = vt.container;
// puts("move");
vt.size = vt.capacity = 0;
vt.container = 0;
vt.container = nullptr;
}
public:
_Ty* container;
uint32_t size, capacity;
typedef _Ty* iterator_t;
typedef std::conditional_t<is_cstr<_Ty>(), astring_view, _Ty> value_t;
vector_type(const uint32_t& size) : size(size), capacity(size) {
explicit vector_type(const uint32_t& size) : size(size), capacity(size) {
if (GC::scratch_space != nullptr) {
[[likely]]
container = (_Ty*)GC::scratch_space->alloc(size * sizeof(_Ty));
}
container = (_Ty*)malloc(size * sizeof(_Ty));
// TODO: calloc for objects.
}
constexpr vector_type(std::initializer_list<_Ty> _l) {
explicit constexpr vector_type(std::initializer_list<_Ty> _l) {
size = capacity = _l.size();
_Ty* _container = this->container = (_Ty*)malloc(sizeof(_Ty) * _l.size());
this->container = (_Ty*)malloc(sizeof(_Ty) * capacity);
_Ty* _container = this->container;
for (const auto& l : _l) {
*(_container++) = l;
}
@ -80,8 +85,9 @@ public:
constexpr vector_type(vector_type<_Ty>&& vt) noexcept : capacity(0) {
_move(std::move(vt));
}
vector_type(vectortype_cstorage vt) noexcept : capacity(vt.capacity), size(vt.size), container((_Ty*)vt.container) {
out(10);
explicit vector_type(vectortype_cstorage vt) noexcept :
capacity(vt.capacity), size(vt.size), container((_Ty*)vt.container) {
// out(10);
};
// size >= capacity ==> readonly vector
constexpr vector_type(const uint32_t size, void* data) :
@ -499,7 +505,7 @@ public:
}
inline void hashtable_push(Key&& k, uint32_t i){
reversemap[i] = ankerl::unordered_dense::set<Key, Hash>::hashtable_push(std::forward<Key&&>(k));
reversemap[i] = ankerl::unordered_dense::set<Key, Hash>::hashtable_push(std::move(k));
++ht_base[reversemap[i]];
}

Loading…
Cancel
Save