improved scratch space

master
Bill 2 years ago
parent 540672cdc4
commit e588e4b0dc

@ -157,4 +157,4 @@ def get_innermost(sl):
elif sl and type(sl) is list: elif sl and type(sl) is list:
return get_innermost(sl[0]) return get_innermost(sl[0])
else: else:
return sl return sl

@ -5,6 +5,7 @@
#include "./server/gc.h" #include "./server/gc.h"
__AQEXPORT__(void) __AQ_Init_GC__(Context* cxt) { __AQEXPORT__(void) __AQ_Init_GC__(Context* cxt) {
GC::gc_handle = static_cast<GC*>(cxt->gc); GC::gc_handle = static_cast<GC*>(cxt->gc);
GC::scratch_space = nullptr;
} }
#else // __AQ_USE_THREADEDGC__ #else // __AQ_USE_THREADEDGC__

@ -4,8 +4,8 @@ from enum import Enum, auto
from typing import Dict, List, Optional, Set, Tuple, Union from typing import Dict, List, Optional, Set, Tuple, Union
from engine.types import * from engine.types import *
from engine.utils import (base62alp, base62uuid, enlist, get_innermost, from engine.utils import (base62alp, base62uuid, enlist,
get_legal_name) get_innermost, get_legal_name)
from reconstruct.storage import ColRef, Context, TableInfo from reconstruct.storage import ColRef, Context, TableInfo
class ast_node: class ast_node:
@ -599,7 +599,7 @@ class groupby_c(ast_node):
self.context.emitc(f'{c}.reserve({self.group}.size());') self.context.emitc(f'{c}.reserve({self.group}.size());')
if col_tovec[i]: # and type(col_types[i]) is VectorT: if col_tovec[i]: # and type(col_types[i]) is VectorT:
typename : Types = col_types[i] # .inner_type typename : Types = col_types[i] # .inner_type
self.context.emitc(f'auto buf_{c} = static_cast<{typename.cname} *>(malloc({self.total_sz} * sizeof({typename.cname})));') self.context.emitc(f'auto buf_{c} = static_cast<{typename.cname} *>(calloc({self.total_sz}, sizeof({typename.cname})));')
tovec_columns.add(c) tovec_columns.add(c)
self.arr_len = 'arrlen_' + base62uuid(3) self.arr_len = 'arrlen_' + base62uuid(3)
self.arr_values = 'arrvals_' + base62uuid(3) self.arr_values = 'arrvals_' + base62uuid(3)
@ -617,7 +617,8 @@ class groupby_c(ast_node):
f'[{preproc_scanner_it}]);' f'[{preproc_scanner_it}]);'
) )
preproc_scanner.finalize() preproc_scanner.finalize()
self.context.emitc(f'GC::scratch_space = GC::gc_handle ? &(GC::gc_handle->scratch) : nullptr;')
# gscanner = scan(self, self.group, loop_style = 'for_each') # gscanner = scan(self, self.group, loop_style = 'for_each')
gscanner = scan(self, self.arr_len) gscanner = scan(self, self.arr_len)
key_var = 'key_'+base62uuid(7) key_var = 'key_'+base62uuid(7)
@ -683,6 +684,7 @@ class groupby_c(ast_node):
gscanner.add(f'{ce[0]}.emplace_back({get_var_names_ex(ex)});\n') gscanner.add(f'{ce[0]}.emplace_back({get_var_names_ex(ex)});\n')
gscanner.finalize() gscanner.finalize()
self.context.emitc(f'GC::scratch_space = nullptr;')
self.datasource.groupinfo = None self.datasource.groupinfo = None

@ -13,7 +13,7 @@ size_t count(const VT<T>& v) {
} }
template <class T> template <class T>
constexpr static inline size_t count(const T&) { return 1; } constexpr static size_t count(const T&) { return 1; }
// TODO: Specializations for dt/str/none // TODO: Specializations for dt/str/none
template<class T, template<typename ...> class VT> template<class T, template<typename ...> class VT>
@ -441,14 +441,12 @@ inline decayed_t<VT, T> aggnext(const VT<T>& arr) {
template<class T, template<typename ...> class VT> template<class T, template<typename ...> class VT>
T last(const VT<T>& arr) { T last(const VT<T>& arr) {
if (!arr.size) return 0; if (!arr.size) return 0;
const uint32_t& len = arr.size;
return arr[arr.size - 1]; return arr[arr.size - 1];
} }
template<class T, template<typename ...> class VT> template<class T, template<typename ...> class VT>
T first(const VT<T>& arr) { T first(const VT<T>& arr) {
if (!arr.size) return 0; if (!arr.size) return 0;
const uint32_t& len = arr.size;
return arr[0]; return arr[0];
} }

@ -1,4 +1,5 @@
#pragma once #pragma once
#include <atomic>
class ScratchSpace { class ScratchSpace {
public: public:
@ -35,10 +36,8 @@ public:
#ifndef __AQ_USE_THREADEDGC__ #ifndef __AQ_USE_THREADEDGC__
#include <atomic>
class GC { class GC {
private:; private:
size_t max_slots, size_t max_slots,
interval, forced_clean, interval, forced_clean,
forceclean_timer = 0; forceclean_timer = 0;
@ -53,7 +52,6 @@ private:;
std::atomic<uint64_t> current_size; std::atomic<uint64_t> current_size;
volatile bool lock; volatile bool lock;
using gc_deallocator_t = void (*)(void*); using gc_deallocator_t = void (*)(void*);
ScratchSpace scratch;
// maybe use volatile std::thread::id instead // maybe use volatile std::thread::id instead
protected: protected:
void acquire_lock(); void acquire_lock();
@ -64,6 +62,7 @@ protected:
void terminate_daemon(); void terminate_daemon();
public: public:
ScratchSpace scratch;
void reg(void* v, uint32_t sz = 0xffffffff, void reg(void* v, uint32_t sz = 0xffffffff,
void(*f)(void*) = free void(*f)(void*) = free
); );
@ -92,6 +91,7 @@ public:
} }
static GC* gc_handle; static GC* gc_handle;
static ScratchSpace *scratch_space;
template <class T> template <class T>
static inline gc_deallocator_t _delete(T*) { static inline gc_deallocator_t _delete(T*) {
return [](void* v){ return [](void* v){

@ -468,6 +468,7 @@ void GC::reg(void* v, uint32_t sz, void(*f)(void*)) { //~ 40ns expected v. free
#endif #endif
inline GC* GC::gc_handle = nullptr; inline GC* GC::gc_handle = nullptr;
inline ScratchSpace* GC::scratch_space = nullptr;
void ScratchSpace::init(size_t initial_capacity) { void ScratchSpace::init(size_t initial_capacity) {
ret = nullptr; ret = nullptr;

@ -49,20 +49,25 @@ public:
this->container = vt.container; this->container = vt.container;
// puts("move"); // puts("move");
vt.size = vt.capacity = 0; vt.size = vt.capacity = 0;
vt.container = 0; vt.container = nullptr;
} }
public: public:
_Ty* container; _Ty* container;
uint32_t size, capacity; uint32_t size, capacity;
typedef _Ty* iterator_t; typedef _Ty* iterator_t;
typedef std::conditional_t<is_cstr<_Ty>(), astring_view, _Ty> value_t; typedef std::conditional_t<is_cstr<_Ty>(), astring_view, _Ty> value_t;
vector_type(const uint32_t& size) : size(size), capacity(size) { explicit vector_type(const uint32_t& size) : size(size), capacity(size) {
if (GC::scratch_space != nullptr) {
[[likely]]
container = (_Ty*)GC::scratch_space->alloc(size * sizeof(_Ty));
}
container = (_Ty*)malloc(size * sizeof(_Ty)); container = (_Ty*)malloc(size * sizeof(_Ty));
// TODO: calloc for objects. // TODO: calloc for objects.
} }
constexpr vector_type(std::initializer_list<_Ty> _l) { explicit constexpr vector_type(std::initializer_list<_Ty> _l) {
size = capacity = _l.size(); size = capacity = _l.size();
_Ty* _container = this->container = (_Ty*)malloc(sizeof(_Ty) * _l.size()); this->container = (_Ty*)malloc(sizeof(_Ty) * capacity);
_Ty* _container = this->container;
for (const auto& l : _l) { for (const auto& l : _l) {
*(_container++) = l; *(_container++) = l;
} }
@ -80,8 +85,9 @@ public:
constexpr vector_type(vector_type<_Ty>&& vt) noexcept : capacity(0) { constexpr vector_type(vector_type<_Ty>&& vt) noexcept : capacity(0) {
_move(std::move(vt)); _move(std::move(vt));
} }
vector_type(vectortype_cstorage vt) noexcept : capacity(vt.capacity), size(vt.size), container((_Ty*)vt.container) { explicit vector_type(vectortype_cstorage vt) noexcept :
out(10); capacity(vt.capacity), size(vt.size), container((_Ty*)vt.container) {
// out(10);
}; };
// size >= capacity ==> readonly vector // size >= capacity ==> readonly vector
constexpr vector_type(const uint32_t size, void* data) : constexpr vector_type(const uint32_t size, void* data) :
@ -499,7 +505,7 @@ public:
} }
inline void hashtable_push(Key&& k, uint32_t i){ inline void hashtable_push(Key&& k, uint32_t i){
reversemap[i] = ankerl::unordered_dense::set<Key, Hash>::hashtable_push(std::forward<Key&&>(k)); reversemap[i] = ankerl::unordered_dense::set<Key, Hash>::hashtable_push(std::move(k));
++ht_base[reversemap[i]]; ++ht_base[reversemap[i]];
} }

Loading…
Cancel
Save